You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by pa...@apache.org on 2016/03/01 09:25:47 UTC

[01/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Repository: falcon
Updated Branches:
  refs/heads/asf-site 6f5b476cc -> 8e49379d2


http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/SLA/FeedSLAMonitoringTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/SLA/FeedSLAMonitoringTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/SLA/FeedSLAMonitoringTest.java
deleted file mode 100644
index fa1f808..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/SLA/FeedSLAMonitoringTest.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.SLA;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-import org.apache.falcon.resource.SchedulableEntityInstanceResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTime;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.ArrayList;
-import java.util.Collections;
-
-
-/**
- * Feed SLA monitoring tests.
- * Test assumes following properties are set in startup.properties of server :
- *      *.feed.sla.statusCheck.frequency.seconds=60
- *      *.feed.sla.lookAheadWindow.millis=60000
- */
-@Test(groups = { "distributed", "embedded" })
-public class FeedSLAMonitoringTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private List<String> slaFeedNames;
-    private List<Frequency> slaFeedFrequencies;
-    private String clusterName;
-    private static final Logger LOGGER = Logger.getLogger(FeedSLAMonitoringTest.class);
-
-    private String startTime;
-    private String endTime;
-    private String slaStartTime;
-    private String slaEndTime;
-    private int noOfFeeds;
-    private int statusCheckFrequency;
-
-    private static final Comparator<SchedulableEntityInstance> DEPENDENCY_COMPARATOR =
-            new Comparator<SchedulableEntityInstance>() {
-                @Override
-                public int compare(SchedulableEntityInstance o1, SchedulableEntityInstance o2) {
-                    return o1.compareTo(o2);
-                }
-            };
-
-    /**
-     * Submitting 3 feeds with different frequencies and sla values.
-     * @throws Exception
-     */
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        clusterName = bundles[0].getClusterNames().get(0);
-        ServiceResponse response =
-                prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-        AssertUtil.assertSucceeded(response);
-
-        startTime = TimeUtil.getTimeWrtSystemTime(-10);
-        endTime = TimeUtil.addMinsToTime(startTime, 20);
-        noOfFeeds=3;
-
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        final String oldFeedName = bundles[0].getInputFeedNameFromBundle();
-        slaFeedFrequencies = Arrays.asList(new Frequency("1", Frequency.TimeUnit.minutes),
-                new Frequency("2", Frequency.TimeUnit.minutes),
-                new Frequency("4", Frequency.TimeUnit.minutes));
-
-        slaFeedNames = Arrays.asList(oldFeedName + "-1", oldFeedName + "-2", oldFeedName + "-3");
-
-        //Submit 3 feeds with different frequencies and sla values.
-        for (int bIndex = 0; bIndex < noOfFeeds; ++bIndex) {
-            final FeedMerlin ipFeed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-
-            ipFeed.setValidity(startTime, endTime);
-            ipFeed.setAvailabilityFlag("_SUCCESS");
-
-            //set slaLow and slaHigh
-            ipFeed.setSla(new Frequency("1", Frequency.TimeUnit.minutes),
-                    new Frequency("2", Frequency.TimeUnit.minutes));
-            ipFeed.setName(slaFeedNames.get(bIndex));
-            ipFeed.setFrequency(slaFeedFrequencies.get(bIndex));
-
-            LOGGER.info("Feed is : " + ipFeed.toString());
-
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(ipFeed.toString()));
-        }
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        cleanTestsDirs();
-        removeTestClassEntities();
-    }
-
-    /**
-     * The following test submits 3 feeds, checks the slaAlert for a given time range and validates its output.
-     * It also checks the sla status when feed is deleted , data created with/without _SUCCESS folder.
-     * @throws Exception
-     */
-    @Test
-    public void feedSLATest() throws Exception {
-        /**TEST : Check sla response for a given time range
-         */
-
-        statusCheckFrequency=60; // 60 seconds
-
-        // Map of instanceDate and corresponding list of SchedulableEntityInstance
-        Map<String, List<SchedulableEntityInstance>> instanceEntityMap = new HashMap<>();
-
-        slaStartTime = startTime;
-        slaEndTime = TimeUtil.addMinsToTime(slaStartTime, 10);
-        DateTime slaStartDate = TimeUtil.oozieDateToDate(slaStartTime);
-        DateTime slaEndDate = TimeUtil.oozieDateToDate(slaEndTime);
-
-        List<SchedulableEntityInstance> expectedInstances = new ArrayList<>();
-        SchedulableEntityInstance expectedSchedulableEntityInstance;
-
-        for (int index = 0; index < noOfFeeds; ++index) {
-
-            DateTime dt = new DateTime(slaStartDate);
-            while (!dt.isAfter(slaEndDate)) {
-
-                expectedSchedulableEntityInstance = new SchedulableEntityInstance(slaFeedNames.get(index),
-                        clusterName, dt.toDate(), EntityType.FEED);
-                expectedSchedulableEntityInstance.setTags("Missed SLA High");
-                expectedInstances.add(expectedSchedulableEntityInstance);
-
-                if (!instanceEntityMap.containsKey(dt.toString())) {
-                    instanceEntityMap.put(dt.toString(), new ArrayList<SchedulableEntityInstance>());
-                }
-                instanceEntityMap.get(dt.toString()).add(expectedSchedulableEntityInstance);
-                dt = dt.plusMinutes(slaFeedFrequencies.get(index).getFrequencyAsInt());
-
-            }
-        }
-
-        TimeUtil.sleepSeconds(statusCheckFrequency);
-
-        SchedulableEntityInstanceResult response = prism.getFeedHelper().getSlaAlert(
-                "?start=" + slaStartTime + "&end=" + slaEndTime).getSlaResult();
-
-        LOGGER.info(response.getMessage());
-
-        validateInstances(response, expectedInstances);
-
-        /**TEST : Create missing dependencies with _SUCCESS directory and check sla response
-         */
-
-        String dateEntry = (String) instanceEntityMap.keySet().toArray()[1];
-        LOGGER.info(dateEntry + "/" + instanceEntityMap.get(dateEntry));
-        List<String> dataDates = InstanceUtil.getMinuteDatesToPath(dateEntry, dateEntry, 0);
-
-        HadoopUtil.createFolders(clusterFS, baseTestHDFSDir + "/input/", dataDates);
-
-        //sla response for feeds when _SUCCESS file is missing from dataPath
-        response = prism.getFeedHelper().getSlaAlert("?start=" + slaStartTime + "&end=" + slaEndTime).getSlaResult();
-
-        // Response does not change as it checks for _SUCCESS file
-        validateInstances(response, expectedInstances);
-
-        //Create _SUCCESS file
-        HadoopUtil.recreateDir(clusterFS, baseTestHDFSDir + "/input/" + dataDates.get(0) + "/_SUCCESS");
-        for (SchedulableEntityInstance instance : instanceEntityMap.get(dateEntry)) {
-            expectedInstances.remove(instance);
-        }
-        instanceEntityMap.remove(dateEntry);
-
-        TimeUtil.sleepSeconds(statusCheckFrequency);
-
-        //sla response for feeds when _SUCCESS file is available in dataPath
-        response = prism.getFeedHelper().getSlaAlert("?start=" + slaStartTime + "&end=" + slaEndTime).getSlaResult();
-        validateInstances(response, expectedInstances);
-
-        /** TEST : Delete feed and check sla response
-         */
-        String deletedFeed = slaFeedNames.get(0);
-        prism.getFeedHelper().deleteByName(deletedFeed, null);
-
-        for (Map.Entry<String, List<SchedulableEntityInstance>> entry : instanceEntityMap.entrySet())
-        {
-            LOGGER.info(entry.getKey() + "/" + entry.getValue());
-            for (SchedulableEntityInstance instance : entry.getValue()) {
-                if (instance.getEntityName().equals(deletedFeed)) {
-                    expectedInstances.remove(instance);
-                }
-            }
-
-        }
-        TimeUtil.sleepSeconds(statusCheckFrequency);
-        response = prism.getFeedHelper().getSlaAlert("?start=" + slaStartTime + "&end=" + slaEndTime).getSlaResult();
-        validateInstances(response, expectedInstances);
-
-    }
-
-    /**
-     * Validating expected response with actual response.
-     * @param response SchedulableEntityInstanceResult response
-     * @param expectedInstances List of expected instances
-     */
-    private static void validateInstances(SchedulableEntityInstanceResult response,
-            List<SchedulableEntityInstance> expectedInstances) {
-
-        List<SchedulableEntityInstance> actualInstances = Arrays.asList(response.getInstances());
-
-        for (SchedulableEntityInstance instance : actualInstances) {
-            instance.setTags("Missed SLA High");
-        }
-
-        Collections.sort(expectedInstances, DEPENDENCY_COMPARATOR);
-        Collections.sort(actualInstances, DEPENDENCY_COMPARATOR);
-
-        Assert.assertEquals(actualInstances, expectedInstances, "Instances mismatch for");
-    }
-}


[51/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
FALCON-1830 Removed code source directories and updated pom


Project: http://git-wip-us.apache.org/repos/asf/falcon/repo
Commit: http://git-wip-us.apache.org/repos/asf/falcon/commit/8e49379d
Tree: http://git-wip-us.apache.org/repos/asf/falcon/tree/8e49379d
Diff: http://git-wip-us.apache.org/repos/asf/falcon/diff/8e49379d

Branch: refs/heads/asf-site
Commit: 8e49379d2c72146e1eba45c08cc15b00e8c2b5e9
Parents: 6f5b476
Author: Pallavi Rao <pa...@inmobi.com>
Authored: Tue Mar 1 13:55:06 2016 +0530
Committer: Pallavi Rao <pa...@inmobi.com>
Committed: Tue Mar 1 13:55:06 2016 +0530

----------------------------------------------------------------------
 CHANGES.txt                                     | 1866 ----------
 acquisition/pom.xml                             |   41 -
 archival/pom.xml                                |   41 -
 build-tools/pom.xml                             |   67 -
 build-tools/src/bin/build-oozie.sh              |   74 -
 build-tools/src/patches/OOZIE-1551-4.0.patch    |   81 -
 build-tools/src/patches/OOZIE-1741.patch        |  397 ---
 .../src/patches/oozie-hadoop2-profile.patch     |   13 -
 build-tools/src/patches/oozie-site.patch        |  165 -
 cli/pom.xml                                     |  207 --
 .../falcon/cli/FalconCLIRuntimeException.java   |   37 -
 .../falcon/cli/commands/BaseFalconCommands.java |  136 -
 .../cli/commands/FalconConnectionCommands.java  |   56 -
 .../cli/commands/FalconEntityCommands.java      |   26 -
 .../cli/commands/FalconInstanceCommands.java    |   26 -
 .../apache/falcon/cli/skel/FalconBanner.java    |   61 -
 .../cli/skel/FalconHistoryFileProvider.java     |   46 -
 .../falcon/cli/skel/FalconPromptProvider.java   |   47 -
 .../META-INF/spring/spring-shell-plugin.xml     |   40 -
 client/pom.xml                                  |  161 -
 .../main/java/org/apache/falcon/LifeCycle.java  |   41 -
 .../src/main/java/org/apache/falcon/Pair.java   |   78 -
 .../java/org/apache/falcon/ResponseHelper.java  |  304 --
 client/src/main/java/org/apache/falcon/Tag.java |   39 -
 .../java/org/apache/falcon/cli/CLIParser.java   |  155 -
 .../org/apache/falcon/cli/FalconAdminCLI.java   |  109 -
 .../java/org/apache/falcon/cli/FalconCLI.java   |  331 --
 .../org/apache/falcon/cli/FalconEntityCLI.java  |  360 --
 .../apache/falcon/cli/FalconInstanceCLI.java    |  336 --
 .../apache/falcon/cli/FalconMetadataCLI.java    |  256 --
 .../org/apache/falcon/cli/FalconRecipeCLI.java  |  121 -
 .../falcon/client/AbstractFalconClient.java     |  466 ---
 .../falcon/client/FalconCLIException.java       |   69 -
 .../org/apache/falcon/client/FalconClient.java  | 1057 ------
 .../falcon/entity/v0/AccessControlList.java     |   40 -
 .../apache/falcon/entity/v0/DateValidator.java  |   81 -
 .../org/apache/falcon/entity/v0/Entity.java     |   98 -
 .../falcon/entity/v0/EntityNotification.java    |   35 -
 .../org/apache/falcon/entity/v0/EntityType.java |  117 -
 .../org/apache/falcon/entity/v0/Frequency.java  |  113 -
 .../apache/falcon/entity/v0/SchemaHelper.java   |   71 -
 .../falcon/metadata/RelationshipType.java       |   67 -
 .../recipe/HdfsReplicationRecipeTool.java       |   70 -
 .../HdfsReplicationRecipeToolOptions.java       |   62 -
 .../recipe/HiveReplicationRecipeTool.java       |  196 --
 .../HiveReplicationRecipeToolOptions.java       |   89 -
 .../java/org/apache/falcon/recipe/Recipe.java   |   29 -
 .../org/apache/falcon/recipe/RecipeFactory.java |   44 -
 .../org/apache/falcon/recipe/RecipeTool.java    |  285 --
 .../apache/falcon/recipe/RecipeToolArgs.java    |   71 -
 .../apache/falcon/recipe/RecipeToolOptions.java |   91 -
 .../recipe/util/RecipeProcessBuilderUtils.java  |  293 --
 .../org/apache/falcon/resource/APIResult.java   |  106 -
 .../org/apache/falcon/resource/EntityList.java  |  213 --
 .../falcon/resource/EntitySummaryResult.java    |  220 --
 .../falcon/resource/FeedInstanceResult.java     |  155 -
 .../falcon/resource/FeedLookupResult.java       |  172 -
 .../resource/InstanceDependencyResult.java      |   86 -
 .../apache/falcon/resource/InstancesResult.java |  260 --
 .../falcon/resource/InstancesSummaryResult.java |  114 -
 .../falcon/resource/LineageGraphResult.java     |  191 --
 .../resource/SchedulableEntityInstance.java     |  175 -
 .../SchedulableEntityInstanceResult.java        |   86 -
 .../apache/falcon/resource/TriageResult.java    |   87 -
 client/src/main/resources/cluster-0.1.xsd       |  211 --
 client/src/main/resources/datasource-0.1.xsd    |  276 --
 client/src/main/resources/feed-0.1.xsd          |  575 ----
 client/src/main/resources/jaxb-binding.xjb      |   72 -
 client/src/main/resources/mysql_database.xml    |   46 -
 client/src/main/resources/process-0.1.xsd       |  443 ---
 .../org/apache/falcon/cli/TestCLIParser.java    |   69 -
 .../falcon/entity/v0/DateValidatorTest.java     |   83 -
 .../apache/falcon/entity/v0/TestFrequency.java  |   37 -
 .../falcon/resource/LineageGraphResultTest.java |   50 -
 common/pom.xml                                  |  222 --
 .../java/org/apache/falcon/FalconException.java |   48 -
 .../apache/falcon/FalconRuntimException.java    |   43 -
 .../falcon/catalog/AbstractCatalogService.java  |  177 -
 .../apache/falcon/catalog/CatalogPartition.java |  179 -
 .../falcon/catalog/CatalogPartitionHandler.java |  313 --
 .../falcon/catalog/CatalogServiceFactory.java   |   57 -
 .../falcon/catalog/HiveCatalogService.java      |  425 ---
 .../falcon/cleanup/AbstractCleanupHandler.java  |  193 --
 .../falcon/cleanup/FeedCleanupHandler.java      |   48 -
 .../falcon/cleanup/ProcessCleanupHandler.java   |   48 -
 .../apache/falcon/entity/CatalogStorage.java    |  592 ----
 .../org/apache/falcon/entity/ClusterHelper.java |  195 --
 .../falcon/entity/ColoClusterRelation.java      |   89 -
 .../apache/falcon/entity/DatasourceHelper.java  |  248 --
 .../entity/EntityNotRegisteredException.java    |   31 -
 .../org/apache/falcon/entity/EntityUtil.java    | 1085 ------
 .../org/apache/falcon/entity/ExternalId.java    |   77 -
 .../org/apache/falcon/entity/FeedHelper.java    | 1292 -------
 .../falcon/entity/FeedInstanceStatus.java       |  148 -
 .../apache/falcon/entity/FileSystemStorage.java |  509 ---
 .../java/org/apache/falcon/entity/HiveUtil.java |   62 -
 .../org/apache/falcon/entity/ProcessHelper.java |  188 -
 .../java/org/apache/falcon/entity/Storage.java  |  116 -
 .../falcon/entity/WorkflowNameBuilder.java      |  157 -
 .../falcon/entity/common/FeedDataPath.java      |   89 -
 .../apache/falcon/entity/lock/MemoryLocks.java  |   79 -
 .../entity/parser/ClusterEntityParser.java      |  405 ---
 .../entity/parser/CrossEntityValidations.java   |  153 -
 .../entity/parser/DatasourceEntityParser.java   |  136 -
 .../falcon/entity/parser/EntityParser.java      |  159 -
 .../entity/parser/EntityParserFactory.java      |   55 -
 .../falcon/entity/parser/FeedEntityParser.java  |  719 ----
 .../entity/parser/ProcessEntityParser.java      |  369 --
 .../entity/parser/ValidationException.java      |   42 -
 .../falcon/entity/store/ConfigurationStore.java |  435 ---
 .../store/EntityAlreadyExistsException.java     |   39 -
 .../falcon/entity/store/FeedLocationStore.java  |  160 -
 .../falcon/entity/store/FeedPathStore.java      |   46 -
 .../entity/store/StoreAccessException.java      |   38 -
 .../apache/falcon/entity/v0/EntityGraph.java    |  255 --
 .../entity/v0/EntityIntegrityChecker.java       |   68 -
 .../entity/v0/UnschedulableEntityException.java |   43 -
 .../falcon/expression/ExpressionHelper.java     |  260 --
 .../java/org/apache/falcon/group/FeedGroup.java |  102 -
 .../org/apache/falcon/group/FeedGroupMap.java   |  124 -
 .../falcon/hadoop/HadoopClientFactory.java      |  274 --
 .../lifecycle/AbstractPolicyBuilderFactory.java |   30 -
 .../falcon/lifecycle/FeedLifecycleStage.java    |   37 -
 .../falcon/lifecycle/LifecyclePolicy.java       |   63 -
 .../apache/falcon/lifecycle/PolicyBuilder.java  |   37 -
 .../lifecycle/retention/AgeBasedDelete.java     |  130 -
 .../lifecycle/retention/RetentionPolicy.java    |   54 -
 .../EntityRelationshipGraphBuilder.java         |  514 ---
 .../org/apache/falcon/metadata/GraphUtils.java  |   84 -
 .../InstanceRelationshipGraphBuilder.java       |  381 ---
 .../falcon/metadata/MetadataMappingService.java |  338 --
 .../metadata/RelationshipGraphBuilder.java      |  223 --
 .../falcon/metadata/RelationshipLabel.java      |   57 -
 .../falcon/metadata/RelationshipProperty.java   |   64 -
 .../falcon/retention/EvictedInstanceSerDe.java  |  117 -
 .../apache/falcon/retention/EvictionHelper.java |   50 -
 .../AuthenticationInitializationService.java    |  163 -
 .../falcon/security/AuthorizationProvider.java  |   85 -
 .../security/CredentialProviderHelper.java      |   89 -
 .../org/apache/falcon/security/CurrentUser.java |  244 --
 .../security/DefaultAuthorizationProvider.java  |  335 --
 .../apache/falcon/security/SecurityUtil.java    |  137 -
 .../service/ConfigurationChangeListener.java    |   61 -
 .../apache/falcon/service/FalconService.java    |   33 -
 .../apache/falcon/service/GroupsService.java    |   67 -
 .../falcon/service/LifecyclePolicyMap.java      |   81 -
 .../falcon/service/LogCleanupService.java       |   93 -
 .../apache/falcon/service/ProxyUserService.java |  203 --
 .../falcon/service/ServiceInitializer.java      |   68 -
 .../org/apache/falcon/service/Services.java     |   86 -
 .../org/apache/falcon/update/UpdateHelper.java  |  132 -
 .../falcon/util/ApplicationProperties.java      |  181 -
 .../org/apache/falcon/util/BuildProperties.java |   55 -
 .../java/org/apache/falcon/util/DateUtil.java   |  102 -
 .../falcon/util/DeploymentProperties.java       |   55 -
 .../org/apache/falcon/util/DeploymentUtil.java  |   97 -
 .../apache/falcon/util/FalconRadixUtils.java    |  321 --
 .../org/apache/falcon/util/HadoopQueueUtil.java |  179 -
 .../org/apache/falcon/util/HdfsClassLoader.java |  161 -
 .../java/org/apache/falcon/util/RadixNode.java  |  150 -
 .../java/org/apache/falcon/util/RadixTree.java  |  432 ---
 .../org/apache/falcon/util/ReflectionUtils.java |   76 -
 .../falcon/util/ReplicationDistCpOption.java    |   43 -
 .../apache/falcon/util/RuntimeProperties.java   |  114 -
 .../apache/falcon/util/StartupProperties.java   |   55 -
 .../falcon/util/StateStoreProperties.java       |  114 -
 .../falcon/workflow/WorkflowEngineFactory.java  |  126 -
 .../falcon/workflow/WorkflowExecutionArgs.java  |  131 -
 .../workflow/WorkflowExecutionContext.java      |  522 ---
 .../workflow/WorkflowExecutionListener.java     |   62 -
 .../WorkflowJobEndNotificationService.java      |  312 --
 .../workflow/engine/AbstractWorkflowEngine.java |  120 -
 .../engine/WorkflowEngineActionListener.java    |   45 -
 .../util/OozieActionConfigurationHelper.java    |   80 -
 .../falcon/workflow/util/OozieConstants.java    |   33 -
 common/src/main/resources/log4j.xml             |   86 -
 common/src/main/resources/runtime.properties    |   54 -
 common/src/main/resources/startup.properties    |  306 --
 .../src/main/resources/statestore.credentials   |   22 -
 common/src/main/resources/statestore.properties |   45 -
 .../falcon/cleanup/LogCleanupServiceTest.java   |  170 -
 .../apache/falcon/entity/AbstractTestBase.java  |  211 --
 .../falcon/entity/CatalogStorageTest.java       |  213 --
 .../falcon/entity/ColoClusterRelationTest.java  |   73 -
 .../apache/falcon/entity/EntityTypeTest.java    |   60 -
 .../apache/falcon/entity/EntityUtilTest.java    |  453 ---
 .../apache/falcon/entity/FeedDataPathTest.java  |  124 -
 .../apache/falcon/entity/FeedHelperTest.java    | 1080 ------
 .../falcon/entity/FileSystemStorageTest.java    |  534 ---
 .../org/apache/falcon/entity/HiveUtilTest.java  |  103 -
 .../apache/falcon/entity/ProcessHelperTest.java |  265 --
 .../falcon/entity/StorageFactoryTest.java       |  306 --
 .../falcon/entity/TestWorkflowNameBuilder.java  |  109 -
 .../falcon/entity/lock/MemoryLocksTest.java     |   64 -
 .../entity/parser/ClusterEntityParserTest.java  |  459 ---
 .../parser/DatasourceEntityParserTest.java      |   95 -
 .../entity/parser/FeedEntityParserTest.java     | 1238 -------
 .../falcon/entity/parser/FeedUpdateTest.java    |  145 -
 .../entity/parser/ProcessEntityParserTest.java  |  632 ----
 .../entity/store/ConfigurationStoreTest.java    |  168 -
 .../entity/store/FeedLocationStoreTest.java     |  263 --
 .../falcon/entity/v0/EntityGraphTest.java       |  407 ---
 .../falcon/expression/ExpressionHelperTest.java |   84 -
 .../apache/falcon/group/FeedGroupMapTest.java   |  200 --
 .../falcon/hadoop/HadoopClientFactoryTest.java  |  106 -
 .../metadata/MetadataMappingServiceTest.java    | 1228 -------
 .../retention/EvictedInstanceSerDeTest.java     |   99 -
 ...AuthenticationInitializationServiceTest.java |  144 -
 .../apache/falcon/security/CurrentUserTest.java |  177 -
 .../DefaultAuthorizationProviderTest.java       |  403 ---
 .../falcon/security/SecurityUtilTest.java       |  162 -
 .../falcon/service/GroupsServiceTest.java       |   56 -
 .../falcon/service/ProxyUserServiceTest.java    |  167 -
 .../apache/falcon/update/UpdateHelperTest.java  |  323 --
 .../falcon/util/ApplicationPropertiesTest.java  |  111 -
 .../org/apache/falcon/util/FalconTestUtil.java  |   33 -
 .../apache/falcon/util/HadoopQueueUtilTest.java |   63 -
 .../org/apache/falcon/util/RadixNodeTest.java   |  109 -
 .../org/apache/falcon/util/RadixTreeTest.java   |  322 --
 .../apache/falcon/util/ReflectionUtilsTest.java |   49 -
 .../workflow/WorkflowExecutionContextTest.java  |  341 --
 .../WorkflowJobEndNotificationServiceTest.java  |  176 -
 common/src/test/resources/classpath.properties  |   21 -
 .../resources/config/cluster/cluster-0.1.xml    |   44 -
 .../config/cluster/cluster-bad-registry.xml     |   43 -
 .../cluster/cluster-bad-write-endpoint.xml      |   44 -
 .../config/cluster/cluster-no-messaging.xml     |   38 -
 .../config/cluster/cluster-no-registry.xml      |   43 -
 .../config/datasource/datasource-0.1.xml        |   48 -
 .../config/datasource/datasource-file-0.1.xml   |   48 -
 .../config/datasource/datasource-file-0.2.xml   |   48 -
 .../datasource/datasource-invalid-0.1.xml       |   46 -
 .../src/test/resources/config/feed/feed-0.1.xml |   70 -
 .../src/test/resources/config/feed/feed-0.2.xml |   63 -
 .../src/test/resources/config/feed/feed-0.3.xml |   83 -
 .../src/test/resources/config/feed/feed-0.4.xml |   74 -
 .../resources/config/feed/feed-export-0.1.xml   |   66 -
 .../feed/feed-export-exclude-fields-0.1.xml     |   66 -
 .../resources/config/feed/feed-import-0.1.xml   |   69 -
 .../feed/feed-import-exclude-fields-0.1.xml     |   74 -
 .../config/feed/feed-import-invalid-0.1.xml     |   73 -
 .../config/feed/feed-import-noargs-0.1.xml      |   64 -
 .../config/feed/feed-schedulerinfo-1.json       |  276 --
 .../config/feed/feed-schedulerinfo-2.json       |   19 -
 .../resources/config/feed/feed-tags-0.1.xml     |   60 -
 .../config/feed/hive-table-feed-out.xml         |   45 -
 .../resources/config/feed/hive-table-feed.xml   |   42 -
 .../test/resources/config/feed/invalid-feed.xml |   53 -
 .../feed/table-with-multiple-sources-feed.xml   |   53 -
 .../resources/config/process/process-0.1.xml    |   59 -
 .../resources/config/process/process-0.2.xml    |   58 -
 .../config/process/process-bad-pipeline.xml     |   56 -
 .../config/process/process-invalid.xml          |   38 -
 .../resources/config/process/process-table.xml  |   49 -
 common/src/test/resources/deploy.properties     |   19 -
 common/src/test/resources/runtime.properties    |   25 -
 distro/pom.xml                                  |   84 -
 docs/license/angular-ui-bootstrap-LICENSE.txt   |   21 -
 docs/license/angular-ui-router-LICENSE.txt      |   21 -
 docs/license/angularJS-LICENSE.txt              |   22 -
 docs/license/bootstrap-LICENSE.txt              |   11 -
 docs/license/d3-LICENSE.txt                     |   26 -
 docs/license/dagre-LICENSE.txt                  |   19 -
 docs/license/dust-LICENSE.txt                   |   19 -
 docs/license/dust-helpers-LICENSE.txt           |   19 -
 docs/license/entypo-font-LICENSE.txt            |   92 -
 docs/license/entypo-icons-LICENSE.txt           |  359 --
 docs/license/jasmine-LICENSE.txt                |   22 -
 docs/license/jquery-LICENSE.txt                 |   19 -
 docs/pom.xml                                    |   63 -
 docs/src/site/resources/Architecture.png        |  Bin 65687 -> 0 bytes
 docs/src/site/resources/EntityDependency.png    |  Bin 53036 -> 0 bytes
 docs/src/site/resources/FeedSchedule.png        |  Bin 84841 -> 0 bytes
 docs/src/site/resources/PrismSetup.png          |  Bin 103747 -> 0 bytes
 docs/src/site/resources/ProcessSchedule.png     |  Bin 85720 -> 0 bytes
 .../images/accessories-text-editor.png          |  Bin 746 -> 0 bytes
 docs/src/site/resources/images/add.gif          |  Bin 397 -> 0 bytes
 .../resources/images/apache-incubator-logo.png  |  Bin 4234 -> 0 bytes
 .../resources/images/apache-maven-project-2.png |  Bin 33442 -> 0 bytes
 .../images/application-certificate.png          |  Bin 923 -> 0 bytes
 docs/src/site/resources/images/contact-new.png  |  Bin 736 -> 0 bytes
 .../resources/images/document-properties.png    |  Bin 577 -> 0 bytes
 .../site/resources/images/drive-harddisk.png    |  Bin 700 -> 0 bytes
 docs/src/site/resources/images/falcon-logo.png  |  Bin 13293 -> 0 bytes
 docs/src/site/resources/images/fix.gif          |  Bin 366 -> 0 bytes
 .../site/resources/images/icon_error_sml.gif    |  Bin 633 -> 0 bytes
 .../src/site/resources/images/icon_help_sml.gif |  Bin 1072 -> 0 bytes
 .../src/site/resources/images/icon_info_sml.gif |  Bin 638 -> 0 bytes
 .../site/resources/images/icon_success_sml.gif  |  Bin 604 -> 0 bytes
 .../site/resources/images/icon_warning_sml.gif  |  Bin 625 -> 0 bytes
 .../site/resources/images/image-x-generic.png   |  Bin 662 -> 0 bytes
 .../resources/images/internet-web-browser.png   |  Bin 1017 -> 0 bytes
 .../images/logos/build-by-maven-black.png       |  Bin 2294 -> 0 bytes
 .../images/logos/build-by-maven-white.png       |  Bin 2260 -> 0 bytes
 .../resources/images/logos/maven-feather.png    |  Bin 3330 -> 0 bytes
 .../site/resources/images/network-server.png    |  Bin 536 -> 0 bytes
 .../site/resources/images/package-x-generic.png |  Bin 717 -> 0 bytes
 .../resources/images/profiles/pre-release.png   |  Bin 32607 -> 0 bytes
 .../site/resources/images/profiles/retired.png  |  Bin 22003 -> 0 bytes
 .../site/resources/images/profiles/sandbox.png  |  Bin 33010 -> 0 bytes
 docs/src/site/resources/images/remove.gif       |  Bin 607 -> 0 bytes
 docs/src/site/resources/images/rss.png          |  Bin 474 -> 0 bytes
 docs/src/site/resources/images/update.gif       |  Bin 1090 -> 0 bytes
 docs/src/site/resources/images/window-new.png   |  Bin 583 -> 0 bytes
 docs/src/site/site.xml                          |   62 -
 docs/src/site/twiki/Appendix.twiki              |   55 -
 docs/src/site/twiki/Configuration.twiki         |  122 -
 docs/src/site/twiki/Distributed-mode.twiki      |  198 --
 docs/src/site/twiki/Embedded-mode.twiki         |  198 --
 docs/src/site/twiki/EntitySpecification.twiki   |  996 ------
 docs/src/site/twiki/FalconCLI.twiki             |  540 ---
 docs/src/site/twiki/FalconDocumentation.twiki   |  777 -----
 .../site/twiki/FalconEmailNotification.twiki    |   29 -
 docs/src/site/twiki/FalconNativeScheduler.twiki |  213 --
 docs/src/site/twiki/HDFSDR.twiki                |   34 -
 docs/src/site/twiki/HiveDR.twiki                |   74 -
 docs/src/site/twiki/HiveIntegration.twiki       |  372 --
 docs/src/site/twiki/ImportExport.twiki          |  242 --
 docs/src/site/twiki/InstallationSteps.twiki     |   87 -
 docs/src/site/twiki/LICENSE.txt                 |    3 -
 docs/src/site/twiki/MigrationInstructions.twiki |   15 -
 docs/src/site/twiki/OnBoarding.twiki            |  269 --
 docs/src/site/twiki/Operability.twiki           |  110 -
 docs/src/site/twiki/Recipes.twiki               |   85 -
 docs/src/site/twiki/Security.twiki              |  387 ---
 docs/src/site/twiki/falconcli/CommonCLI.twiki   |   21 -
 .../site/twiki/falconcli/ContinueInstance.twiki |    8 -
 docs/src/site/twiki/falconcli/Definition.twiki  |    8 -
 .../src/site/twiki/falconcli/DeleteEntity.twiki |    8 -
 .../site/twiki/falconcli/DependencyEntity.twiki |   10 -
 .../twiki/falconcli/DependencyInstance.twiki    |   33 -
 .../src/site/twiki/falconcli/EdgeMetadata.twiki |   11 -
 docs/src/site/twiki/falconcli/FalconCLI.twiki   |  112 -
 .../twiki/falconcli/FeedInstanceListing.twiki   |   11 -
 docs/src/site/twiki/falconcli/HelpAdmin.twiki   |    6 -
 .../src/site/twiki/falconcli/KillInstance.twiki |   14 -
 .../twiki/falconcli/LifeCycleInstance.twiki     |    9 -
 .../site/twiki/falconcli/LineageMetadata.twiki  |   12 -
 docs/src/site/twiki/falconcli/ListEntity.twiki  |   17 -
 .../src/site/twiki/falconcli/ListInstance.twiki |   20 -
 .../src/site/twiki/falconcli/ListMetadata.twiki |   13 -
 .../src/site/twiki/falconcli/LogsInstance.twiki |   14 -
 docs/src/site/twiki/falconcli/Lookup.twiki      |   12 -
 .../site/twiki/falconcli/ParamsInstance.twiki   |    8 -
 .../site/twiki/falconcli/RelationMetadata.twiki |   10 -
 .../site/twiki/falconcli/RerunInstance.twiki    |   10 -
 .../src/site/twiki/falconcli/ResumeEntity.twiki |    8 -
 .../site/twiki/falconcli/ResumeInstance.twiki   |    8 -
 .../site/twiki/falconcli/RunningInstance.twiki  |   13 -
 docs/src/site/twiki/falconcli/SLAAlert.twiki    |   49 -
 docs/src/site/twiki/falconcli/Schedule.twiki    |   22 -
 docs/src/site/twiki/falconcli/StatusAdmin.twiki |    8 -
 .../src/site/twiki/falconcli/StatusEntity.twiki |    8 -
 .../site/twiki/falconcli/StatusInstance.twiki   |   21 -
 docs/src/site/twiki/falconcli/Submit.twiki      |   13 -
 .../src/site/twiki/falconcli/SubmitRecipe.twiki |   17 -
 .../site/twiki/falconcli/SummaryEntity.twiki    |   14 -
 .../site/twiki/falconcli/SummaryInstance.twiki  |   20 -
 .../site/twiki/falconcli/SuspendEntity.twiki    |    8 -
 .../site/twiki/falconcli/SuspendInstance.twiki  |    8 -
 docs/src/site/twiki/falconcli/Touch.twiki       |   10 -
 .../site/twiki/falconcli/TriageInstance.twiki   |    9 -
 .../src/site/twiki/falconcli/UpdateEntity.twiki |   14 -
 .../src/site/twiki/falconcli/VersionAdmin.twiki |    7 -
 .../twiki/falconcli/VertexEdgesMetadata.twiki   |   12 -
 .../site/twiki/falconcli/VertexMetadata.twiki   |   11 -
 .../site/twiki/falconcli/VerticesMetadata.twiki |   11 -
 docs/src/site/twiki/index.twiki                 |   43 -
 .../site/twiki/restapi/AdjacentVertices.twiki   |   91 -
 docs/src/site/twiki/restapi/AdminConfig.twiki   |   35 -
 docs/src/site/twiki/restapi/AdminStack.twiki    |   40 -
 docs/src/site/twiki/restapi/AdminVersion.twiki  |   35 -
 docs/src/site/twiki/restapi/AllEdges.twiki      |   42 -
 docs/src/site/twiki/restapi/AllVertices.twiki   |   43 -
 docs/src/site/twiki/restapi/Edge.twiki          |   34 -
 .../site/twiki/restapi/EntityDefinition.twiki   |   53 -
 docs/src/site/twiki/restapi/EntityDelete.twiki  |   31 -
 .../site/twiki/restapi/EntityDependencies.twiki |   43 -
 docs/src/site/twiki/restapi/EntityLineage.twiki |   40 -
 docs/src/site/twiki/restapi/EntityList.twiki    |  164 -
 docs/src/site/twiki/restapi/EntityResume.twiki  |   30 -
 .../src/site/twiki/restapi/EntitySchedule.twiki |  100 -
 docs/src/site/twiki/restapi/EntityStatus.twiki  |   30 -
 docs/src/site/twiki/restapi/EntitySubmit.twiki  |  105 -
 .../twiki/restapi/EntitySubmitAndSchedule.twiki |   64 -
 docs/src/site/twiki/restapi/EntitySummary.twiki |   74 -
 docs/src/site/twiki/restapi/EntitySuspend.twiki |   30 -
 docs/src/site/twiki/restapi/EntityTouch.twiki   |   31 -
 docs/src/site/twiki/restapi/EntityUpdate.twiki  |   66 -
 .../src/site/twiki/restapi/EntityValidate.twiki |  170 -
 .../twiki/restapi/FeedInstanceListing.twiki     |   46 -
 docs/src/site/twiki/restapi/FeedLookup.twiki    |   37 -
 docs/src/site/twiki/restapi/FeedSLA.twiki       |   56 -
 docs/src/site/twiki/restapi/Graph.twiki         |   22 -
 .../twiki/restapi/InstanceDependencies.twiki    |   49 -
 docs/src/site/twiki/restapi/InstanceKill.twiki  |   44 -
 docs/src/site/twiki/restapi/InstanceList.twiki  |  151 -
 docs/src/site/twiki/restapi/InstanceLogs.twiki  |  113 -
 .../src/site/twiki/restapi/InstanceParams.twiki |   83 -
 docs/src/site/twiki/restapi/InstanceRerun.twiki |   66 -
 .../src/site/twiki/restapi/InstanceResume.twiki |   43 -
 .../site/twiki/restapi/InstanceRunning.twiki    |   84 -
 .../src/site/twiki/restapi/InstanceStatus.twiki |   98 -
 .../site/twiki/restapi/InstanceSummary.twiki    |  114 -
 .../site/twiki/restapi/InstanceSuspend.twiki    |   44 -
 docs/src/site/twiki/restapi/MetadataList.twiki  |   31 -
 .../site/twiki/restapi/MetadataRelations.twiki  |   46 -
 docs/src/site/twiki/restapi/ResourceList.twiki  |   93 -
 docs/src/site/twiki/restapi/Triage.twiki        |   45 -
 docs/src/site/twiki/restapi/Vertex.twiki        |   36 -
 .../site/twiki/restapi/VertexProperties.twiki   |   34 -
 docs/src/site/twiki/restapi/Vertices.twiki      |   38 -
 falcon-regression/.gitignore                    |   59 -
 falcon-regression/CHANGES.txt                   |  598 ----
 falcon-regression/README.md                     |  291 --
 falcon-regression/merlin-core/pom.xml           |  249 --
 .../regression/Entities/ClusterMerlin.java      |  325 --
 .../falcon/regression/Entities/FeedMerlin.java  |  537 ---
 .../regression/Entities/ProcessMerlin.java      |  691 ----
 .../regression/Entities/RecipeMerlin.java       |  366 --
 .../regression/Entities/TestEntityUtil.java     |   64 -
 .../falcon/regression/core/bundle/Bundle.java   |  927 -----
 .../core/enumsAndConstants/FreqType.java        |  126 -
 .../core/enumsAndConstants/MerlinConstants.java |  124 -
 .../core/enumsAndConstants/ResponseErrors.java  |   43 -
 .../core/enumsAndConstants/RetentionUnit.java   |   64 -
 .../regression/core/helpers/ColoHelper.java     |   54 -
 .../core/helpers/FalconClientBuilder.java       |  190 --
 .../regression/core/helpers/LineageHelper.java  |  328 --
 .../helpers/entity/AbstractEntityHelper.java    |  733 ----
 .../helpers/entity/ClusterEntityHelper.java     |  119 -
 .../helpers/entity/EntityHelperFactory.java     |   40 -
 .../core/helpers/entity/FeedEntityHelper.java   |   44 -
 .../helpers/entity/ProcessEntityHelper.java     |   40 -
 .../core/response/ServiceResponse.java          |  134 -
 .../core/response/lineage/Direction.java        |   45 -
 .../regression/core/response/lineage/Edge.java  |  103 -
 .../core/response/lineage/EdgeResult.java       |   34 -
 .../core/response/lineage/EdgesResult.java      |   51 -
 .../core/response/lineage/GraphEntity.java      |   32 -
 .../core/response/lineage/GraphResult.java      |   32 -
 .../core/response/lineage/NODE_TYPE.java        |   27 -
 .../core/response/lineage/Vertex.java           |  172 -
 .../core/response/lineage/VertexIdsResult.java  |   35 -
 .../core/response/lineage/VertexResult.java     |   28 -
 .../core/response/lineage/VerticesResult.java   |   66 -
 .../regression/core/supportClasses/Brother.java |  112 -
 .../core/supportClasses/ExecResult.java         |   70 -
 .../core/supportClasses/HadoopFileEditor.java   |  114 -
 .../core/supportClasses/JmsMessageConsumer.java |   97 -
 .../core/supportClasses/NotifyingAssert.java    |   76 -
 .../falcon/regression/core/util/AssertUtil.java |  519 ---
 .../falcon/regression/core/util/BundleUtil.java |  234 --
 .../regression/core/util/CleanupUtil.java       |  107 -
 .../falcon/regression/core/util/Config.java     |   81 -
 .../regression/core/util/EntityLineageUtil.java |  151 -
 .../falcon/regression/core/util/ExecUtil.java   |  211 --
 .../falcon/regression/core/util/FileUtil.java   |   48 -
 .../falcon/regression/core/util/Generator.java  |   48 -
 .../regression/core/util/GraphAssert.java       |  173 -
 .../falcon/regression/core/util/HCatUtil.java   |   79 -
 .../falcon/regression/core/util/HadoopUtil.java |  569 ----
 .../falcon/regression/core/util/HiveAssert.java |  269 --
 .../falcon/regression/core/util/HiveUtil.java   |  156 -
 .../regression/core/util/InstanceUtil.java      |  855 -----
 .../regression/core/util/KerberosHelper.java    |   52 -
 .../falcon/regression/core/util/LogUtil.java    |  344 --
 .../falcon/regression/core/util/MatrixUtil.java |   89 -
 .../falcon/regression/core/util/OSUtil.java     |   56 -
 .../falcon/regression/core/util/OozieUtil.java  |  855 -----
 .../falcon/regression/core/util/TimeUtil.java   |  301 --
 .../falcon/regression/core/util/UIAssert.java   |   51 -
 .../falcon/regression/core/util/UiUtil.java     |  106 -
 .../falcon/regression/core/util/Util.java       |  607 ----
 .../falcon/regression/core/util/XmlUtil.java    |   46 -
 .../org/apache/falcon/request/BaseRequest.java  |  214 --
 .../org/apache/falcon/request/RequestKeys.java  |   36 -
 .../security/FalconAuthorizationToken.java      |  115 -
 falcon-regression/merlin/pom.xml                |   94 -
 .../regression/testHelper/BaseTestClass.java    |  145 -
 .../regression/testHelper/BaseUITestClass.java  |   70 -
 .../regression/ui/pages/EntitiesPage.java       |  122 -
 .../falcon/regression/ui/pages/EntityPage.java  |   81 -
 .../apache/falcon/regression/ui/pages/Page.java |  163 -
 .../falcon/regression/ui/pages/ProcessPage.java |  345 --
 .../ui/search/AbstractSearchPage.java           |  268 --
 .../regression/ui/search/ClusterWizardPage.java |  556 ---
 .../falcon/regression/ui/search/EntityPage.java |  692 ----
 .../regression/ui/search/EntityWizardPage.java  |   94 -
 .../regression/ui/search/FeedWizardPage.java    |  652 ----
 .../regression/ui/search/InstancePage.java      |  115 -
 .../falcon/regression/ui/search/LoginPage.java  |  104 -
 .../regression/ui/search/MirrorWizardPage.java  |  517 ---
 .../falcon/regression/ui/search/PageHeader.java |  310 --
 .../regression/ui/search/ProcessWizardPage.java |  960 ------
 .../falcon/regression/ui/search/SearchPage.java |  456 ---
 .../src/main/resources/errorMapping.properties  |   26 -
 .../src/main/resources/log4testng.properties    |   29 -
 .../falcon/regression/AuthorizationTest.java    |  772 -----
 .../falcon/regression/CombinedActionsTest.java  |  217 --
 .../regression/ELExpCurrentAndLastWeekTest.java |  179 -
 .../regression/ELExpFutureAndLatestTest.java    |  114 -
 .../falcon/regression/ELValidationsTest.java    |  283 --
 .../regression/EmbeddedPigScriptTest.java       |  164 -
 .../falcon/regression/ExternalFSTest.java       |  219 --
 .../regression/FeedClusterUpdateTest.java       |  678 ----
 .../regression/FeedInstanceListingTest.java     |  261 --
 .../regression/FeedInstanceStatusTest.java      |  234 --
 .../falcon/regression/FeedLateRerunTest.java    |  230 --
 .../falcon/regression/FeedReplicationTest.java  |  581 ----
 .../falcon/regression/FeedResumeTest.java       |  118 -
 .../apache/falcon/regression/FeedSLATest.java   |  215 --
 .../falcon/regression/FeedScheduleTest.java     |  139 -
 .../falcon/regression/FeedStatusTest.java       |  167 -
 .../regression/FeedSubmitAndScheduleTest.java   |  175 -
 .../falcon/regression/FeedSubmitTest.java       |  163 -
 .../falcon/regression/FeedSuspendTest.java      |  141 -
 .../falcon/regression/InstanceParamTest.java    |  165 -
 .../falcon/regression/InstanceSummaryTest.java  |  267 --
 .../apache/falcon/regression/LogMoverTest.java  |  146 -
 .../apache/falcon/regression/NewRetryTest.java  | 1182 -------
 .../falcon/regression/NoOutputProcessTest.java  |  145 -
 .../falcon/regression/ProcessFrequencyTest.java |  152 -
 .../ProcessInstanceColoMixedTest.java           |  240 --
 .../regression/ProcessInstanceKillsTest.java    |  369 --
 .../regression/ProcessInstanceRerunTest.java    |  449 ---
 .../regression/ProcessInstanceResumeTest.java   |  293 --
 .../regression/ProcessInstanceRunningTest.java  |  188 -
 .../regression/ProcessInstanceStatusTest.java   |  495 ---
 .../regression/ProcessInstanceSuspendTest.java  |  268 --
 .../falcon/regression/ProcessLateRerunTest.java |  352 --
 .../regression/ProcessLibPathLoadTest.java      |  180 -
 .../falcon/regression/ProcessLibPathTest.java   |  131 -
 .../falcon/regression/ProcessSLATest.java       |  174 -
 .../falcon/regression/ProcessUpdateTest.java    |  110 -
 .../regression/SLA/FeedSLAMonitoringTest.java   |  258 --
 .../apache/falcon/regression/SearchApiTest.java |  498 ---
 .../falcon/regression/TestngListener.java       |  171 -
 .../regression/TouchAPIPrismAndServerTest.java  |  185 -
 .../ValidateAPIPrismAndServerTest.java          |  262 --
 .../entity/EntitiesPatternSearchTest.java       |  197 --
 .../regression/entity/ListEntitiesTest.java     |  307 --
 .../regression/hcat/HCatFeedOperationsTest.java |  242 --
 .../falcon/regression/hcat/HCatProcessTest.java |  638 ----
 .../regression/hcat/HCatReplicationTest.java    |  351 --
 .../regression/hcat/HCatRetentionTest.java      |  294 --
 .../regression/hive/dr/HdfsRecipeTest.java      |  131 -
 .../falcon/regression/hive/dr/HiveDRTest.java   |  736 ----
 .../falcon/regression/hive/dr/HiveDbDRTest.java |  279 --
 .../regression/hive/dr/HiveObjectCreator.java   |  208 --
 .../regression/hive/dr/RecipeExecLocation.java  |   63 -
 .../regression/lineage/EntityLineageTest.java   |  221 --
 .../regression/lineage/EntitySummaryTest.java   |  423 ---
 .../lineage/LineageApiProcessInstanceTest.java  |  227 --
 .../regression/lineage/LineageApiTest.java      |  655 ----
 .../lineage/ListFeedInstancesTest.java          |  493 ---
 .../lineage/ListProcessInstancesTest.java       |  374 --
 .../regression/lineage/ProcessPipelineTest.java |  178 -
 .../nativeScheduler/NativeScheduleTest.java     |  234 --
 .../regression/prism/EntityDryRunTest.java      |  149 -
 .../falcon/regression/prism/FeedDelayTest.java  |  233 --
 .../prism/NewPrismProcessUpdateTest.java        | 1643 ---------
 .../regression/prism/OptionalInputTest.java     |  346 --
 .../prism/PrismClusterDeleteTest.java           |  432 ---
 .../prism/PrismConcurrentRequestTest.java       |  270 --
 .../regression/prism/PrismFeedDeleteTest.java   | 1107 ------
 .../prism/PrismFeedLateReplicationTest.java     |  642 ----
 .../PrismFeedReplicationPartitionExpTest.java   |  868 -----
 .../prism/PrismFeedReplicationUpdateTest.java   |  304 --
 .../regression/prism/PrismFeedResumeTest.java   |  314 --
 .../regression/prism/PrismFeedScheduleTest.java |   91 -
 .../regression/prism/PrismFeedSnSTest.java      |  448 ---
 .../regression/prism/PrismFeedSuspendTest.java  |  312 --
 .../regression/prism/PrismFeedUpdateTest.java   |  268 --
 .../prism/PrismProcessDeleteTest.java           |  965 ------
 .../prism/PrismProcessResumeTest.java           |  289 --
 .../prism/PrismProcessScheduleTest.java         |  401 ---
 .../regression/prism/PrismProcessSnSTest.java   |  269 --
 .../prism/PrismProcessSuspendTest.java          |  273 --
 .../regression/prism/PrismSubmitTest.java       |  566 ---
 .../prism/ProcessPartitionExpVariableTest.java  |  141 -
 .../prism/RescheduleKilledProcessTest.java      |  125 -
 .../RescheduleProcessInFinalStatesTest.java     |  200 --
 .../falcon/regression/prism/RetentionTest.java  |  572 ----
 .../prism/UpdateAtSpecificTimeTest.java         |  620 ----
 .../regression/searchUI/ClusterSetupTest.java   |  434 ---
 .../searchUI/EntitiesTableReflectionTest.java   |  203 --
 .../regression/searchUI/EntitiesTableTest.java  |  166 -
 .../regression/searchUI/EntityPageTest.java     |  605 ----
 .../regression/searchUI/FeedSetupTest.java      |  999 ------
 .../regression/searchUI/HomePageTest.java       |  209 --
 .../regression/searchUI/InstancePageTest.java   |  188 -
 .../falcon/regression/searchUI/LoginTest.java   |  111 -
 .../searchUI/MirrorSourceTargetOptionsTest.java |  206 --
 .../regression/searchUI/MirrorSummaryTest.java  |  207 --
 .../falcon/regression/searchUI/MirrorTest.java  |  414 ---
 .../regression/searchUI/ProcessSetupTest.java   | 1082 ------
 .../regression/security/AclValidationTest.java  |  135 -
 .../regression/security/ClusterAclTest.java     |  146 -
 .../falcon/regression/security/EntityOp.java    |  274 --
 .../regression/security/FalconClientTest.java   |   96 -
 .../falcon/regression/security/FeedAclTest.java |  221 --
 .../regression/security/ProcessAclTest.java     |  235 --
 .../triage/FeedInstanceDependencyTest.java      |  221 --
 .../triage/PipelineInstanceDependencyTest.java  |  335 --
 .../triage/ProcessInstanceDependencyTest.java   |  204 --
 .../triage/TriageAPISingleColoTest.java         |  447 ---
 .../falcon/regression/ui/LineageGraphTest.java  |  370 --
 .../falcon/regression/ui/ProcessUITest.java     |  229 --
 .../resources/AvailabilityBundle/depends.txt    |   19 -
 .../valid/bundle1/cluster-0.1.xml               |   41 -
 .../valid/bundle1/feed-template1.xml            |   51 -
 .../valid/bundle1/feed-template2.xml            |   51 -
 .../valid/bundle1/process-agg.xml               |   51 -
 .../src/test/resources/ELbundle/cluster-0.1.xml |   41 -
 .../test/resources/ELbundle/feed-template1.xml  |   52 -
 .../test/resources/ELbundle/feed-template2.xml  |   52 -
 .../src/test/resources/ELbundle/process-agg.xml |   51 -
 .../FeedReplicationBundles/InputFeed.xml        |   47 -
 .../FeedReplicationBundles/cluster-0.1.xml      |   38 -
 .../HdfsRecipe/hdfs-replication-template.xml    |   44 -
 .../HdfsRecipe/hdfs-replication-workflow.xml    |   82 -
 .../HdfsRecipe/hdfs-replication.properties      |   79 -
 .../hive-disaster-recovery-template.xml         |   45 -
 .../hive-disaster-recovery-workflow.xml         |  249 --
 .../hive-disaster-recovery.properties           |   97 -
 .../hive-disaster-recovery-secure-template.xml  |   45 -
 .../hive-disaster-recovery-secure-workflow.xml  |  355 --
 .../hive-disaster-recovery-secure.properties    |  106 -
 .../valid1/bundle1/cluster-0.1.xml              |   41 -
 .../valid1/bundle1/feed-template1.xml           |   51 -
 .../valid1/bundle1/feed-template2.xml           |   51 -
 .../valid1/bundle1/process-agg.xml              |   47 -
 .../merlin/src/test/resources/LogMover/id.pig   |   21 -
 .../src/test/resources/LogMover/workflow.xml    |   39 -
 .../MultipleActionWorkflow/workflow.xml         |   75 -
 .../2ndLateData/dataFile1.txt                   |   73 -
 .../2ndLateData/dataFile2.txt                   |   87 -
 .../2ndLateData/dataFile3.txt                   |   36 -
 .../SingleFile/minimumText.txt                  |   17 -
 .../OozieExampleInputData/normalInput/_SUCCESS  |   19 -
 .../normalInput/dataFile.properties             |   30 -
 .../normalInput/dataFile.xml                    |   63 -
 .../normalInput/dataFile1.txt                   |   61 -
 .../normalInput/dataFile2.txt                   |  412 ---
 .../normalInput/dataFile3.txt                   |  139 -
 .../normalInput/dataFile4.txt                   |  174 -
 .../valid/bundle1/cluster-0.1.xml               |   41 -
 .../valid/bundle1/feed-hdfsoutputdir.xml        |   40 -
 .../valid/bundle1/feed-inpath.xml               |   59 -
 .../valid/bundle1/feed-interpath.xml            |   40 -
 .../RetentionBundles/valid/bundle1/process.xml  |   50 -
 .../RetryTests/valid1/bundle1/cluster-0.1.xml   |   41 -
 .../valid1/bundle1/feed-template1.xml           |   50 -
 .../valid1/bundle1/feed-template2.xml           |   51 -
 .../RetryTests/valid1/bundle1/process-agg.xml   |   49 -
 .../src/test/resources/availabilityFlag.txt     |    0
 .../resources/combinedActions/cluster-0.1.xml   |   48 -
 .../combinedActions/feed-template1.xml          |   51 -
 .../combinedActions/feed-template1_hcat.xml     |   41 -
 .../combinedActions/feed-template2.xml          |   51 -
 .../combinedActions/feed-template2_hcat.xml     |   47 -
 .../combinedActions/feed-template3.xml          |   51 -
 .../resources/combinedActions/process-agg.xml   |   51 -
 .../src/test/resources/combinedWorkflow/id.pig  |   20 -
 .../test/resources/combinedWorkflow/script.hql  |   19 -
 .../resources/combinedWorkflow/workflow.xml     |  109 -
 .../src/test/resources/hcat/cluster-0.1.xml     |   48 -
 .../src/test/resources/hcat/data/data.txt       |    3 -
 .../src/test/resources/hcat/feed-template1.xml  |   44 -
 .../src/test/resources/hcat/feed-template2.xml  |   48 -
 .../src/test/resources/hcat/hcat-process.xml    |   47 -
 .../test/resources/hcat/hivescript/script.hql   |   19 -
 .../hcat/hivescript/script_non_hcat_input.hql   |   27 -
 .../hcat/hivescript/script_non_hcat_output.hql  |   20 -
 .../script_one_hcat_input_two_hcat_output.hql   |   22 -
 .../script_two_hcat_input_one_hcat_output.hql   |   21 -
 .../script_two_hcat_input_two_hcat_output.hql   |   24 -
 .../src/test/resources/hcat_2/cluster-0.1.xml   |   48 -
 .../test/resources/hcat_2/feed-template1.xml    |   44 -
 .../test/resources/hcat_2/feed-template2.xml    |   48 -
 .../src/test/resources/hcat_2/process-agg.xml   |   52 -
 .../merlin/src/test/resources/log4j.xml         |   36 -
 .../src/test/resources/oozie/workflow.xml       |   61 -
 .../src/test/resources/oozieLib/workflow.xml    |   61 -
 .../merlin/src/test/resources/pig/id.pig        |   20 -
 .../src/test/resources/sleep/workflow.xml       |   85 -
 .../test/resources/updateBundle/cluster-0.1.xml |   41 -
 .../resources/updateBundle/feed-template1.xml   |   52 -
 .../resources/updateBundle/feed-template2.xml   |   52 -
 .../test/resources/updateBundle/process-agg.xml |   52 -
 .../workflows/aggregatorNoOutput/workflow.xml   |   32 -
 falcon-regression/pom.xml                       |  487 ---
 falcon-ui/Gruntfile.js                          |  272 --
 falcon-ui/Installation-steps.txt                |   47 -
 falcon-ui/README.md                             |   40 -
 falcon-ui/app/SpecRunner.html                   |   58 -
 falcon-ui/app/config/loginData.json             |    5 -
 falcon-ui/app/css/bootstrap/less/.csscomb.json  |  297 --
 falcon-ui/app/css/bootstrap/less/.csslintrc     |   19 -
 falcon-ui/app/css/bootstrap/less/alerts.less    |   68 -
 falcon-ui/app/css/bootstrap/less/badges.less    |   55 -
 falcon-ui/app/css/bootstrap/less/bootstrap.less |   51 -
 .../app/css/bootstrap/less/breadcrumbs.less     |   26 -
 .../app/css/bootstrap/less/button-groups.less   |  240 --
 falcon-ui/app/css/bootstrap/less/buttons.less   |  157 -
 falcon-ui/app/css/bootstrap/less/carousel.less  |  243 --
 falcon-ui/app/css/bootstrap/less/close.less     |   33 -
 falcon-ui/app/css/bootstrap/less/code.less      |   68 -
 .../bootstrap/less/component-animations.less    |   31 -
 falcon-ui/app/css/bootstrap/less/dropdowns.less |  215 --
 falcon-ui/app/css/bootstrap/less/forms.less     |  540 ---
 .../app/css/bootstrap/less/glyphicons.less      |  305 --
 falcon-ui/app/css/bootstrap/less/grid.less      |   84 -
 .../app/css/bootstrap/less/input-groups.less    |  166 -
 falcon-ui/app/css/bootstrap/less/jumbotron.less |   48 -
 falcon-ui/app/css/bootstrap/less/labels.less    |   64 -
 .../app/css/bootstrap/less/list-group.less      |  131 -
 falcon-ui/app/css/bootstrap/less/media.less     |   56 -
 falcon-ui/app/css/bootstrap/less/mixins.less    |   39 -
 .../app/css/bootstrap/less/mixins/alerts.less   |   14 -
 .../less/mixins/background-variant.less         |    8 -
 .../bootstrap/less/mixins/border-radius.less    |   18 -
 .../app/css/bootstrap/less/mixins/buttons.less  |   50 -
 .../css/bootstrap/less/mixins/center-block.less |    7 -
 .../app/css/bootstrap/less/mixins/clearfix.less |   22 -
 .../app/css/bootstrap/less/mixins/forms.less    |   81 -
 .../css/bootstrap/less/mixins/gradients.less    |   59 -
 .../bootstrap/less/mixins/grid-framework.less   |   91 -
 .../app/css/bootstrap/less/mixins/grid.less     |  122 -
 .../css/bootstrap/less/mixins/hide-text.less    |   21 -
 .../app/css/bootstrap/less/mixins/image.less    |   34 -
 .../app/css/bootstrap/less/mixins/labels.less   |   12 -
 .../css/bootstrap/less/mixins/list-group.less   |   29 -
 .../css/bootstrap/less/mixins/nav-divider.less  |   10 -
 .../less/mixins/nav-vertical-align.less         |    9 -
 .../app/css/bootstrap/less/mixins/opacity.less  |    8 -
 .../css/bootstrap/less/mixins/pagination.less   |   23 -
 .../app/css/bootstrap/less/mixins/panels.less   |   24 -
 .../css/bootstrap/less/mixins/progress-bar.less |   10 -
 .../css/bootstrap/less/mixins/reset-filter.less |    8 -
 .../app/css/bootstrap/less/mixins/resize.less   |    6 -
 .../less/mixins/responsive-visibility.less      |   15 -
 .../app/css/bootstrap/less/mixins/size.less     |   10 -
 .../css/bootstrap/less/mixins/tab-focus.less    |    9 -
 .../css/bootstrap/less/mixins/table-row.less    |   28 -
 .../bootstrap/less/mixins/text-emphasis.less    |    8 -
 .../bootstrap/less/mixins/text-overflow.less    |    8 -
 .../bootstrap/less/mixins/vendor-prefixes.less  |  224 --
 falcon-ui/app/css/bootstrap/less/modals.less    |  150 -
 falcon-ui/app/css/bootstrap/less/navbar.less    |  655 ----
 falcon-ui/app/css/bootstrap/less/navs.less      |  242 --
 falcon-ui/app/css/bootstrap/less/normalize.less |  425 ---
 falcon-ui/app/css/bootstrap/less/pager.less     |   55 -
 .../app/css/bootstrap/less/pagination.less      |   88 -
 falcon-ui/app/css/bootstrap/less/panels.less    |  243 --
 falcon-ui/app/css/bootstrap/less/popovers.less  |  133 -
 falcon-ui/app/css/bootstrap/less/print.less     |  101 -
 .../app/css/bootstrap/less/progress-bars.less   |  105 -
 .../css/bootstrap/less/responsive-embed.less    |   34 -
 .../bootstrap/less/responsive-utilities.less    |  194 --
 .../app/css/bootstrap/less/scaffolding.less     |  150 -
 falcon-ui/app/css/bootstrap/less/tables.less    |  233 --
 falcon-ui/app/css/bootstrap/less/theme.less     |  258 --
 .../app/css/bootstrap/less/thumbnails.less      |   36 -
 falcon-ui/app/css/bootstrap/less/tooltip.less   |   95 -
 falcon-ui/app/css/bootstrap/less/type.less      |  313 --
 falcon-ui/app/css/bootstrap/less/utilities.less |   57 -
 falcon-ui/app/css/bootstrap/less/variables.less |  846 -----
 falcon-ui/app/css/bootstrap/less/wells.less     |   29 -
 falcon-ui/app/css/fonts/Cabin-Bold-webfont.eot  |  Bin 24071 -> 0 bytes
 falcon-ui/app/css/fonts/Cabin-Bold-webfont.svg  | 1833 ----------
 falcon-ui/app/css/fonts/Cabin-Bold-webfont.ttf  |  Bin 58180 -> 0 bytes
 falcon-ui/app/css/fonts/Cabin-Bold-webfont.woff |  Bin 27180 -> 0 bytes
 .../app/css/fonts/Cabin-Bold-webfont.woff2      |  Bin 21264 -> 0 bytes
 .../app/css/fonts/Cabin-Italic-webfont.eot      |  Bin 27595 -> 0 bytes
 .../app/css/fonts/Cabin-Italic-webfont.svg      | 1924 -----------
 .../app/css/fonts/Cabin-Italic-webfont.ttf      |  Bin 61740 -> 0 bytes
 .../app/css/fonts/Cabin-Italic-webfont.woff     |  Bin 30784 -> 0 bytes
 .../app/css/fonts/Cabin-Italic-webfont.woff2    |  Bin 24024 -> 0 bytes
 .../app/css/fonts/Cabin-Regular-webfont.eot     |  Bin 25212 -> 0 bytes
 .../app/css/fonts/Cabin-Regular-webfont.svg     | 1991 -----------
 .../app/css/fonts/Cabin-Regular-webfont.ttf     |  Bin 59004 -> 0 bytes
 .../app/css/fonts/Cabin-Regular-webfont.woff    |  Bin 28160 -> 0 bytes
 .../app/css/fonts/Cabin-Regular-webfont.woff2   |  Bin 22092 -> 0 bytes
 falcon-ui/app/css/fonts/cabin.less              |   53 -
 falcon-ui/app/css/fonts/entypo.eot              |  Bin 35540 -> 0 bytes
 falcon-ui/app/css/fonts/entypo.less             | 1190 -------
 falcon-ui/app/css/fonts/entypo.svg              |   13 -
 falcon-ui/app/css/fonts/entypo.ttf              |  Bin 35392 -> 0 bytes
 falcon-ui/app/css/fonts/entypo.woff             |  Bin 21916 -> 0 bytes
 .../css/fonts/glyphicons-halflings-regular.eot  |  Bin 20127 -> 0 bytes
 .../css/fonts/glyphicons-halflings-regular.svg  |  288 --
 .../css/fonts/glyphicons-halflings-regular.ttf  |  Bin 45404 -> 0 bytes
 .../css/fonts/glyphicons-halflings-regular.woff |  Bin 23424 -> 0 bytes
 .../fonts/glyphicons-halflings-regular.woff2    |  Bin 18028 -> 0 bytes
 falcon-ui/app/css/img/ajax-loader.gif           |  Bin 18533 -> 0 bytes
 falcon-ui/app/css/img/ajax-loader_2.gif         |  Bin 1849 -> 0 bytes
 falcon-ui/app/css/img/falcon.png                |  Bin 4742 -> 0 bytes
 falcon-ui/app/css/img/user.svg                  |   15 -
 falcon-ui/app/css/main.css                      |    1 -
 falcon-ui/app/css/main.less                     |   86 -
 falcon-ui/app/css/styles/angular.notify.less    |   84 -
 falcon-ui/app/css/styles/animate.less           | 3181 -----------------
 falcon-ui/app/css/styles/autocomplete-tags.less |   70 -
 falcon-ui/app/css/styles/chart.less             |  159 -
 falcon-ui/app/css/styles/common.less            |  880 -----
 falcon-ui/app/css/styles/dataset-form.less      |  494 ---
 falcon-ui/app/css/styles/entities-list.less     |  212 --
 falcon-ui/app/css/styles/entity-summary.less    |   17 -
 falcon-ui/app/css/styles/form-pages.less        |  520 ---
 falcon-ui/app/css/styles/mixins.less            |   64 -
 falcon-ui/app/css/styles/nav-header.less        |  240 --
 falcon-ui/app/css/styles/popover.less           |  230 --
 falcon-ui/app/css/styles/progress-bar.less      |  313 --
 falcon-ui/app/css/styles/server-messages.less   |   81 -
 falcon-ui/app/css/variables.less                |  864 -----
 falcon-ui/app/html/authenticating.html          |   37 -
 .../html/cluster/clusterFormGeneralStepTpl.html |  215 --
 .../html/cluster/clusterFormSummaryStepTpl.html |   87 -
 falcon-ui/app/html/cluster/clusterFormTpl.html  |   83 -
 .../html/dataset/datasetFormGeneralStepTpl.html |  718 ----
 .../html/dataset/datasetFormSummaryStepTpl.html |  146 -
 falcon-ui/app/html/dataset/datasetFormTpl.html  |   57 -
 .../html/directives/autocomplete-template.html  |   31 -
 .../html/directives/dependenciesGraphDv.html    |   19 -
 .../app/html/directives/entitiesListDv.html     |  107 -
 .../html/directives/entitiesSearchListDv.html   |  128 -
 .../app/html/directives/entitySummaryDv.html    |   47 -
 .../app/html/directives/instancesListDv.html    |  148 -
 .../app/html/directives/lineageGraphDv.html     |   36 -
 falcon-ui/app/html/directives/navDv.html        |  100 -
 falcon-ui/app/html/directives/notify.html       |   35 -
 .../app/html/directives/serverMessagesDv.html   |   53 -
 falcon-ui/app/html/directives/tags-input.html   |   31 -
 .../app/html/directives/timeZoneSelectDv.html   |   54 -
 falcon-ui/app/html/entityDefinitionTpl.html     |   35 -
 falcon-ui/app/html/entityDetailsTpl.html        |   76 -
 falcon-ui/app/html/entitySummary.html           |  147 -
 .../app/html/feed/feedFormClustersStepTpl.html  |  196 --
 .../app/html/feed/feedFormGeneralStepTpl.html   |  124 -
 .../app/html/feed/feedFormLocationStepTpl.html  |  113 -
 .../html/feed/feedFormPropertiesStepTpl.html    |  161 -
 .../app/html/feed/feedFormSummaryStepTpl.html   |  163 -
 falcon-ui/app/html/feed/feedFormTpl.html        |  107 -
 falcon-ui/app/html/feed/feedSummary.html        |  214 --
 falcon-ui/app/html/formsTpl.html                |   23 -
 falcon-ui/app/html/instanceDetails.html         |   78 -
 falcon-ui/app/html/login.html                   |   83 -
 falcon-ui/app/html/mainTpl.html                 |   75 -
 .../process/processFormClustersStepTpl.html     |  133 -
 .../html/process/processFormGeneralStepTpl.html |  170 -
 .../processFormInputsAndOutputsStepTpl.html     |  163 -
 .../process/processFormPropertiesStepTpl.html   |  126 -
 .../html/process/processFormSummaryStepTpl.html |  173 -
 falcon-ui/app/html/process/processFormTpl.html  |  106 -
 falcon-ui/app/html/process/processSummary.html  |  184 -
 falcon-ui/app/index.html                        |   44 -
 falcon-ui/app/js/app.js                         |  341 --
 .../js/controllers/cluster/cluster-module.js    |  354 --
 falcon-ui/app/js/controllers/controllers.js     |   35 -
 .../app/js/controllers/dashboard-controller.js  |  220 --
 .../controllers/dataset/dataset-controller.js   |  642 ----
 .../js/controllers/entity/entity-definition.js  |   50 -
 .../app/js/controllers/entity/entity-details.js |  169 -
 .../app/js/controllers/entity/entity-module.js  |   48 -
 .../js/controllers/entity/instance-details.js   |  131 -
 .../feed/feed-clusters-controller.js            |  135 -
 .../feed/feed-general-information-controller.js |   48 -
 .../feed/feed-location-controller.js            |   57 -
 .../app/js/controllers/feed/feed-module.js      |   34 -
 .../feed/feed-properties-controller.js          |   59 -
 .../app/js/controllers/feed/feed-root-ctrl.js   |  248 --
 .../controllers/feed/feed-summary-controller.js |   53 -
 .../app/js/controllers/header-controller.js     |  116 -
 falcon-ui/app/js/controllers/login.js           |   72 -
 .../process/process-clusters-ctrl.js            |   75 -
 .../process/process-general-information-ctrl.js |   66 -
 .../process/process-inputs-and-outputs-ctrl.js  |   99 -
 .../js/controllers/process/process-module.js    |   33 -
 .../process/process-properties-controller.js    |   36 -
 .../js/controllers/process/process-root-ctrl.js |  139 -
 .../controllers/process/process-summary-ctrl.js |   95 -
 falcon-ui/app/js/controllers/root-controller.js |  140 -
 falcon-ui/app/js/directives/chart.js            |  531 ---
 falcon-ui/app/js/directives/check-name.js       |  165 -
 .../app/js/directives/dependencies-graph.js     |  294 --
 falcon-ui/app/js/directives/directives.js       |  182 -
 falcon-ui/app/js/directives/entities-list.js    |  198 --
 .../app/js/directives/entities-search-list.js   |  305 --
 .../js/directives/entity-summary-directive.js   |   65 -
 falcon-ui/app/js/directives/instances-list.js   |  832 -----
 falcon-ui/app/js/directives/lineage-graph.js    |  288 --
 falcon-ui/app/js/directives/ng-tags-input.js    | 1148 -------
 falcon-ui/app/js/directives/server-messages.js  |   36 -
 falcon-ui/app/js/directives/tooltip.js          |   37 -
 .../app/js/directives/validation-message.js     |  249 --
 falcon-ui/app/js/lib/angular-animate.min.js     |   32 -
 falcon-ui/app/js/lib/angular-cookies.min.js     |    7 -
 falcon-ui/app/js/lib/angular-messages.min.js    |    9 -
 falcon-ui/app/js/lib/angular-mocks.js           | 2380 -------------
 falcon-ui/app/js/lib/angular.min.js             |  249 --
 falcon-ui/app/js/lib/bootstrap.notify.js        |  347 --
 falcon-ui/app/js/lib/checklist-model.js         |   99 -
 falcon-ui/app/js/lib/d3.min.js                  |    5 -
 falcon-ui/app/js/lib/dagre.min.js               |    2 -
 falcon-ui/app/js/lib/dust-full-2.0.0.min.js     | 3209 ------------------
 falcon-ui/app/js/lib/jquery-1.11.1.min.js       |    4 -
 falcon-ui/app/js/lib/ng-mask.min.js             |    1 -
 falcon-ui/app/js/lib/popover.js                 |  463 ---
 .../app/js/lib/ui-bootstrap-tpls-0.11.0.min.js  |   10 -
 falcon-ui/app/js/lib/uirouter.min.js            |    8 -
 falcon-ui/app/js/lib/xml2json.min.js            |  578 ----
 falcon-ui/app/js/services/common/date-helper.js |   81 -
 falcon-ui/app/js/services/common/falcon-api.js  |  411 ---
 falcon-ui/app/js/services/common/file-api.js    |   62 -
 .../app/js/services/common/json-transformer.js  |  109 -
 falcon-ui/app/js/services/common/server-api.js  |   88 -
 .../js/services/common/validation-service.js    |  197 --
 .../js/services/common/xml-to-json-service.js   |   80 -
 .../app/js/services/entity/entity-factory.js    |  274 --
 .../app/js/services/entity/entity-messages.js   |   57 -
 .../app/js/services/entity/entity-model.js      |  464 ---
 .../app/js/services/entity/entity-serializer.js |  520 ---
 .../app/js/services/entity/search-entity.js     |   63 -
 .../app/js/services/entity/search-instance.js   |   50 -
 falcon-ui/app/js/services/services.js           |   48 -
 .../test/controllers/HeaderControllerSpec.js    |  119 -
 .../app/test/controllers/MainControllerSpec.js  |  203 --
 .../controllers/cluster/cluster-moduleSpec.js   |  336 --
 .../entity/EntityRootControllerSpec.js          |   90 -
 .../FeedGeneralInformationControllerSpec.js     |   83 -
 .../feed/FeedLocationControllerSpec.js          |   62 -
 .../feed/FeedPropertiesControllerSpec.js        |   81 -
 .../test/controllers/feed/FeedRootCtrlSpec.js   |  199 --
 .../feed/FeedSummaryControllerSpec.js           |   68 -
 .../process/ProcessClustersCtrlSpec.js          |  107 -
 .../ProcessGeneralInformationCtrlSpec.js        |  111 -
 .../process/ProcessInputsAndOutputsCtrlSpec.js  |   52 -
 .../process/ProcessPropertiesControllerSpec.js  |   44 -
 .../controllers/process/ProcessRootCtrlSpec.js  |  136 -
 .../process/ProcessSummaryCtrlSpec.js           |   88 -
 falcon-ui/app/test/directives/DirectivesSpec.js |  134 -
 .../directives/EntitySummaryDirectiveSpec.js    |   88 -
 falcon-ui/app/test/e2e/ClusterE2E.js            |  153 -
 falcon-ui/app/test/e2e/FeedE2E.js               |  178 -
 falcon-ui/app/test/e2e/LoginE2E.js              |   55 -
 falcon-ui/app/test/e2e/ProcessE2E.js            |   95 -
 falcon-ui/app/test/e2e/protractor.js            |    7 -
 falcon-ui/app/test/lib/jasmine-2.0.2/boot.js    |  120 -
 falcon-ui/app/test/lib/jasmine-2.0.2/console.js |  166 -
 .../app/test/lib/jasmine-2.0.2/jasmine-html.js  |  390 ---
 .../app/test/lib/jasmine-2.0.2/jasmine.css      |   61 -
 falcon-ui/app/test/lib/jasmine-2.0.2/jasmine.js | 2593 --------------
 .../test/lib/jasmine-2.0.2/jasmine_favicon.png  |  Bin 1486 -> 0 bytes
 .../app/test/services/EntityFactorySpec.js      |  107 -
 falcon-ui/app/test/services/EntityModelSpec.js  |   55 -
 .../app/test/services/EntitySerializerSpec.js   | 1285 -------
 .../app/test/services/FalconServiceSpec.js      |   77 -
 .../app/test/services/JsonTransformerSpec.js    |  144 -
 .../app/test/services/ValdationServiceSpec.js   |   70 -
 falcon-ui/app/test/services/X2jsServiceSpec.js  |  128 -
 falcon-ui/bower.json                            |   18 -
 falcon-ui/express-data/chartData.js             |  215 --
 falcon-ui/express-data/datsetData.js            |   69 -
 falcon-ui/express-data/mockData.js              |  614 ----
 falcon-ui/karma.conf.js                         |   81 -
 falcon-ui/package.json                          |   40 -
 falcon-ui/pom.xml                               |  111 -
 falcon-ui/server.js                             |  526 ---
 falcon_merge_pr.py                              |  503 ---
 hadoop-dependencies/pom.xml                     |  192 --
 .../falcon/hadoop/FileSystemExtension.java      |   28 -
 .../apache/falcon/hadoop/JailedFileSystem.java  |  216 --
 ...op.mapreduce.protocol.ClientProtocolProvider |   14 -
 .../mapred/ClassicClientProtocolProvider.java   |   73 -
 html5-ui/css/bootstrap.min.css                  |    9 -
 html5-ui/css/falcon.css                         |  241 --
 html5-ui/entity.html                            |  169 -
 html5-ui/fonts/glyphicons-halflings-regular.eot |  Bin 20290 -> 0 bytes
 html5-ui/fonts/glyphicons-halflings-regular.svg |  229 --
 html5-ui/fonts/glyphicons-halflings-regular.ttf |  Bin 41236 -> 0 bytes
 .../fonts/glyphicons-halflings-regular.woff     |  Bin 23292 -> 0 bytes
 html5-ui/img/falcon-114.png                     |  Bin 17181 -> 0 bytes
 html5-ui/img/falcon-144.png                     |  Bin 23857 -> 0 bytes
 html5-ui/img/falcon-57.png                      |  Bin 8036 -> 0 bytes
 html5-ui/img/falcon-64.png                      |  Bin 8020 -> 0 bytes
 html5-ui/img/falcon-72.png                      |  Bin 9276 -> 0 bytes
 html5-ui/img/falcon.png                         |  Bin 12349 -> 0 bytes
 html5-ui/img/favicon.png                        |  Bin 8020 -> 0 bytes
 html5-ui/index.html                             |   80 -
 html5-ui/js/bootstrap.min.js                    |    9 -
 html5-ui/js/d3.v3.min.js                        |    5 -
 html5-ui/js/dagre.min.js                        |    2 -
 html5-ui/js/dust-full-2.0.0.min.js              |    1 -
 html5-ui/js/dust-helpers-1.2.0.min.js           |    1 -
 html5-ui/js/falcon-entity.js                    |  266 --
 html5-ui/js/falcon-index.js                     |  107 -
 html5-ui/js/falcon-lineage.js                   |  240 --
 html5-ui/js/falcon.js                           |  166 -
 html5-ui/js/jquery-1.11.0.min.js                |    4 -
 lifecycle/pom.xml                               |  208 --
 .../engine/oozie/OoziePolicyBuilderFactory.java |   59 -
 .../retention/AgeBasedCoordinatorBuilder.java   |  122 -
 .../oozie/retention/AgeBasedDeleteBuilder.java  |   56 -
 .../retention/AgeBasedWorkflowBuilder.java      |  158 -
 .../engine/oozie/utils/OozieBuilderUtils.java   |  558 ---
 .../resources/action/feed/eviction-action.xml   |   59 -
 .../src/main/resources/binding/jaxb-binding.xjb |   26 -
 .../lifecycle/retention/AgeBasedDeleteTest.java |  108 -
 messaging/pom.xml                               |  187 -
 .../falcon/messaging/JMSMessageConsumer.java    |  313 --
 .../falcon/messaging/JMSMessageProducer.java    |  301 --
 .../falcon/messaging/util/MessagingUtil.java    |   94 -
 .../src/main/resources/jms-config.properties    |   30 -
 messaging/src/main/resources/log4j.xml          |   63 -
 .../falcon/messaging/FeedProducerTest.java      |  214 --
 .../messaging/JMSMessageConsumerTest.java       |  291 --
 .../messaging/JMSMessageProducerTest.java       |  245 --
 .../falcon/messaging/ProcessProducerTest.java   |  165 -
 metrics/pom.xml                                 |   96 -
 .../falcon/aspect/AbstractFalconAspect.java     |  141 -
 .../org/apache/falcon/aspect/AlertMessage.java  |   56 -
 .../org/apache/falcon/aspect/AuditMessage.java  |   74 -
 .../org/apache/falcon/aspect/GenericAlert.java  |  131 -
 .../apache/falcon/aspect/ResourceMessage.java   |   71 -
 .../falcon/job/FSReplicationCounters.java       |   44 -
 .../falcon/job/HiveReplicationCounters.java     |   62 -
 .../java/org/apache/falcon/job/JobCounters.java |   92 -
 .../apache/falcon/job/JobCountersHandler.java   |   43 -
 .../java/org/apache/falcon/job/JobType.java     |   27 -
 .../falcon/job/ReplicationJobCountersList.java  |   61 -
 .../java/org/apache/falcon/monitors/Alert.java  |   37 -
 .../org/apache/falcon/monitors/Auditable.java   |   37 -
 .../org/apache/falcon/monitors/Dimension.java   |   37 -
 .../org/apache/falcon/monitors/Monitored.java   |   37 -
 .../org/apache/falcon/monitors/TimeTaken.java   |   36 -
 .../apache/falcon/plugin/AlertingPlugin.java    |   30 -
 .../apache/falcon/plugin/AuditingPlugin.java    |   30 -
 .../falcon/plugin/DefaultMonitoringPlugin.java  |   50 -
 .../apache/falcon/plugin/MonitoringPlugin.java  |   29 -
 .../falcon/plugin/NotificationPlugin.java       |   29 -
 .../falcon/util/EmailNotificationProps.java     |   58 -
 .../apache/falcon/util/NotificationType.java    |   46 -
 .../falcon/util/ResourcesReflectionUtil.java    |  147 -
 .../apache/falcon/aspect/AlertMessageTest.java  |   53 -
 .../apache/falcon/aspect/AuditMessageTest.java  |   57 -
 .../falcon/job/FSReplicationCountersTest.java   |   52 -
 monitoring/check_falcon_job.pl                  |  221 --
 monitoring/falcon_job_status.pl                 |  147 -
 monitoring/get_oozie_stats.pl                   |  160 -
 oozie-el-extensions/pom.xml                     |  107 -
 .../src/main/conf/oozie-site.xml                |  256 --
 .../oozie/extensions/OozieELExtensions.java     |  471 ---
 .../oozie/extensions/TestOozieELExtensions.java |  312 --
 .../src/test/resources/oozie-site.xml           |  399 ---
 oozie/pom.xml                                   |  219 --
 .../falcon/logging/DefaultTaskLogRetriever.java |   67 -
 .../org/apache/falcon/logging/JobLogMover.java  |  200 --
 .../org/apache/falcon/logging/LogProvider.java  |  173 -
 .../falcon/logging/TaskLogRetrieverYarn.java    |   86 -
 .../falcon/logging/TaskLogURLRetriever.java     |   32 -
 .../oozie/DatabaseExportWorkflowBuilder.java    |  172 -
 .../oozie/DatabaseImportWorkflowBuilder.java    |  161 -
 .../falcon/oozie/ExportWorkflowBuilder.java     |   85 -
 .../oozie/FeedExportCoordinatorBuilder.java     |  193 --
 .../oozie/FeedImportCoordinatorBuilder.java     |  192 --
 .../apache/falcon/oozie/ImportExportCommon.java |   73 -
 .../falcon/oozie/ImportWorkflowBuilder.java     |   85 -
 .../apache/falcon/oozie/OozieBundleBuilder.java |  173 -
 .../falcon/oozie/OozieCoordinatorBuilder.java   |  146 -
 .../apache/falcon/oozie/OozieEntityBuilder.java |  285 --
 .../OozieOrchestrationWorkflowBuilder.java      |  500 ---
 .../feed/FSReplicationWorkflowBuilder.java      |   89 -
 .../falcon/oozie/feed/FeedBundleBuilder.java    |   94 -
 .../feed/FeedReplicationCoordinatorBuilder.java |  437 ---
 .../feed/FeedReplicationWorkflowBuilder.java    |  141 -
 .../feed/FeedRetentionCoordinatorBuilder.java   |  106 -
 .../feed/FeedRetentionWorkflowBuilder.java      |  151 -
 .../feed/HCatReplicationWorkflowBuilder.java    |  175 -
 .../process/HiveProcessWorkflowBuilder.java     |  102 -
 .../process/OozieProcessWorkflowBuilder.java    |   42 -
 .../process/PigProcessWorkflowBuilder.java      |   87 -
 .../oozie/process/ProcessBundleBuilder.java     |  116 -
 .../ProcessExecutionCoordinatorBuilder.java     |  360 --
 .../ProcessExecutionWorkflowBuilder.java        |  288 --
 .../apache/falcon/service/FalconPathFilter.java |   30 -
 .../service/SharedLibraryHostingService.java    |  207 --
 .../java/org/apache/falcon/util/OozieUtils.java |  100 -
 .../falcon/workflow/FalconPostProcessing.java   |  104 -
 .../apache/falcon/workflow/LateDataHandler.java |  299 --
 .../falcon/workflow/engine/NullBundleJob.java   |  132 -
 .../workflow/engine/OozieClientFactory.java     |   79 -
 .../engine/OozieHouseKeepingService.java        |   86 -
 .../workflow/engine/OozieWorkflowEngine.java    | 1798 ----------
 .../oozie/client/LocalOozieClientBundle.java    |  382 ---
 .../client/LocalOozieClientCoordProxy.java      |   99 -
 .../oozie/client/LocalProxyOozieClient.java     |  228 --
 .../apache/oozie/client/ProxyOozieClient.java   |  607 ----
 oozie/src/main/resources/.DS_Store              |  Bin 6148 -> 0 bytes
 .../resources/action/feed/eviction-action.xml   |   59 -
 .../feed/export-sqoop-database-action.xml       |   47 -
 .../action/feed/falcon-table-export.hql         |   18 -
 .../action/feed/falcon-table-import.hql         |   20 -
 .../feed/import-sqoop-database-action.xml       |   47 -
 .../action/feed/replication-action.xml          |   64 -
 .../resources/action/feed/table-cleanup.xml     |   25 -
 .../main/resources/action/feed/table-export.xml |   48 -
 .../main/resources/action/feed/table-import.xml |   42 -
 .../src/main/resources/action/post-process.xml  |   98 -
 oozie/src/main/resources/action/pre-process.xml |   54 -
 .../resources/action/process/hive-action.xml    |   37 -
 .../resources/action/process/oozie-action.xml   |   25 -
 .../resources/action/process/pig-action.xml     |   40 -
 .../src/main/resources/binding/jaxb-binding.xjb |   27 -
 .../coordinator/replication-coordinator.xml     |   51 -
 .../oozie/bundle/BundleUnmarshallingTest.java   |   51 -
 .../CoordinatorUnmarshallingTest.java           |   50 -
 .../feed/OozieFeedWorkflowBuilderTest.java      |  930 -----
 .../oozie/logging/TaskLogRetrieverYarnTest.java |  131 -
 .../falcon/oozie/process/AbstractTestBase.java  |  290 --
 .../OozieProcessWorkflowBuilderTest.java        |  788 -----
 .../workflow/FalconPostProcessingTest.java      |  291 --
 .../workflow/WorkflowUnmarshallingTest.java     |   52 -
 .../resources/config/cluster/cluster-0.1.xml    |   44 -
 .../src/test/resources/config/feed/feed-0.1.xml |   63 -
 .../config/feed/hive-table-feed-out.xml         |   43 -
 .../resources/config/feed/hive-table-feed.xml   |   43 -
 .../test/resources/config/late/late-cluster.xml |   43 -
 .../test/resources/config/late/late-feed1.xml   |   53 -
 .../test/resources/config/late/late-feed2.xml   |   53 -
 .../test/resources/config/late/late-feed3.xml   |   53 -
 .../resources/config/late/late-process1.xml     |   41 -
 .../resources/config/late/late-process2.xml     |   57 -
 .../config/process/dumb-hive-process.xml        |   39 -
 .../resources/config/process/dumb-process.xml   |   40 -
 .../config/process/hive-process-FSInputFeed.xml |   46 -
 .../process/hive-process-FSOutputFeed.xml       |   46 -
 .../resources/config/process/hive-process.xml   |   46 -
 .../config/process/pig-process-0.1.xml          |   53 -
 .../config/process/pig-process-table.xml        |   46 -
 .../resources/config/process/process-0.1.xml    |   45 -
 .../config/process/process-no-inputs.xml        |   43 -
 .../config/process/process-no-outputs.xml       |   43 -
 oozie/src/test/resources/feed/feed.xml          |   57 -
 .../feed/fs-local-retention-lifecycle-feed.xml  |   61 -
 .../feed/fs-replication-feed-counters.xml       |   59 -
 .../test/resources/feed/fs-replication-feed.xml |   68 -
 .../test/resources/feed/fs-retention-feed.xml   |   50 -
 .../feed/fs-retention-lifecycle-feed.xml        |   60 -
 oozie/src/test/resources/feed/src-cluster.xml   |   40 -
 .../resources/feed/table-replication-feed.xml   |   42 -
 .../test/resources/feed/trg-cluster-alpha.xml   |   39 -
 .../test/resources/feed/trg-cluster-beta.xml    |   39 -
 oozie/src/test/resources/feed/trg-cluster.xml   |   40 -
 oozie/src/test/resources/oozie/xmls/bundle.xml  |   32 -
 .../test/resources/oozie/xmls/coordinator.xml   |   58 -
 .../src/test/resources/oozie/xmls/workflow.xml  |   38 -
 pom.xml                                         | 1437 --------
 prism/enunciate.xml                             |   38 -
 prism/pom.xml                                   |  288 --
 .../java/org/apache/falcon/FalconServer.java    |  141 -
 .../org/apache/falcon/FalconWebException.java   |   79 -
 .../falcon/handler/SLAMonitoringHandler.java    |   87 -
 .../falcon/listener/ContextStartupListener.java |   96 -
 .../plugin/ChainableMonitoringPlugin.java       |  147 -
 .../apache/falcon/plugin/EmailNotification.java |  167 -
 .../falcon/plugin/EmailNotificationPlugin.java  |   74 -
 .../falcon/plugin/NotificationHandler.java      |   39 -
 .../falcon/resource/AbstractEntityManager.java  | 1177 -------
 .../resource/AbstractInstanceManager.java       |  975 ------
 .../AbstractSchedulableEntityManager.java       |  417 ---
 .../falcon/resource/admin/AdminResource.java    |  210 --
 .../resource/channel/AbstractChannel.java       |   51 -
 .../apache/falcon/resource/channel/Channel.java |   31 -
 .../falcon/resource/channel/ChannelFactory.java |   62 -
 .../falcon/resource/channel/HTTPChannel.java    |  231 --
 .../falcon/resource/channel/IPCChannel.java     |   67 -
 .../falcon/resource/channel/MethodKey.java      |   85 -
 .../resource/channel/NullServletRequest.java    |  315 --
 .../resource/channel/SecureHTTPChannel.java     |   59 -
 .../metadata/AbstractMetadataResource.java      |   70 -
 .../metadata/LineageMetadataResource.java       |  633 ----
 .../metadata/MetadataDiscoveryResource.java     |  354 --
 .../resource/provider/JAXBContextResolver.java  |   75 -
 .../falcon/resource/proxy/BufferedRequest.java  |  358 --
 .../resource/proxy/InstanceManagerProxy.java    |  653 ----
 .../proxy/SchedulableEntityManagerProxy.java    |  797 -----
 .../security/ClientCertificateFilter.java       |   98 -
 .../falcon/security/FalconAuditFilter.java      |  105 -
 .../security/FalconAuthenticationFilter.java    |  208 --
 .../security/FalconAuthorizationFilter.java     |  221 --
 .../apache/falcon/security/HostnameFilter.java  |  105 -
 ...eUserInHeaderBasedAuthenticationHandler.java |   49 -
 .../service/FeedSLAMonitoringService.java       |  569 ----
 .../service/ProcessSubscriberService.java       |   78 -
 .../org/apache/falcon/util/EmbeddedServer.java  |   72 -
 .../apache/falcon/util/NotificationUtil.java    |   77 -
 .../falcon/util/SecureEmbeddedServer.java       |   53 -
 .../java/org/apache/falcon/util/Servlets.java   |   90 -
 prism/src/main/resources/deploy.properties      |   29 -
 .../main/resources/falcon-buildinfo.properties  |   28 -
 prism/src/main/resources/log4j.xml              |   77 -
 prism/src/main/webapp/WEB-INF/web.xml           |   93 -
 .../apache/falcon/aspect/GenericAlertTest.java  |   33 -
 .../apache/falcon/aspect/LoggingAspectTest.java |   81 -
 .../plugin/ChainableMonitoringPluginTest.java   |   74 -
 .../falcon/plugin/EmailNotificationTest.java    |  160 -
 .../falcon/resource/EntityManagerTest.java      |  433 ---
 .../falcon/resource/InstanceManagerTest.java    |   33 -
 .../resource/admin/AdminResourceTest.java       |   67 -
 .../resource/admin/MockHttpServletResponse.java |  190 --
 .../metadata/LineageMetadataResourceTest.java   |  431 ---
 .../metadata/MetadataDiscoveryResourceTest.java |  320 --
 .../resource/metadata/MetadataTestContext.java  |  273 --
 .../security/ClientCertificateFilterTest.java   |  134 -
 .../falcon/security/FalconAuditFilterTest.java  |  145 -
 .../FalconAuthenticationFilterTest.java         |  299 --
 .../security/FalconAuthorizationFilterTest.java |  273 --
 .../falcon/security/HostnameFilterTest.java     |   93 -
 .../falcon/service/FeedSLAMonitoringTest.java   |  196 --
 replication/pom.xml                             |   78 -
 .../falcon/replication/CustomReplicator.java    |   65 -
 .../falcon/replication/FeedReplicator.java      |  308 --
 .../falcon/replication/FilteredCopyListing.java |  177 -
 .../falcon/replication/FeedReplicatorTest.java  |  123 -
 .../replication/FilteredCopyListingTest.java    |  233 --
 rerun/pom.xml                                   |  159 -
 .../apache/falcon/rerun/event/LaterunEvent.java |   43 -
 .../apache/falcon/rerun/event/RerunEvent.java   |  133 -
 .../falcon/rerun/event/RerunEventFactory.java   |   70 -
 .../apache/falcon/rerun/event/RetryEvent.java   |   62 -
 .../rerun/handler/AbstractRerunConsumer.java    |  100 -
 .../rerun/handler/AbstractRerunHandler.java     |   87 -
 .../falcon/rerun/handler/LateRerunConsumer.java |  153 -
 .../falcon/rerun/handler/LateRerunHandler.java  |  263 --
 .../rerun/handler/RerunHandlerFactory.java      |   50 -
 .../falcon/rerun/handler/RetryConsumer.java     |   97 -
 .../falcon/rerun/handler/RetryHandler.java      |  143 -
 .../rerun/policy/AbstractRerunPolicy.java       |   44 -
 .../falcon/rerun/policy/ExpBackoffPolicy.java   |   58 -
 .../apache/falcon/rerun/policy/FinalPolicy.java |   40 -
 .../falcon/rerun/policy/PeriodicPolicy.java     |   29 -
 .../falcon/rerun/policy/RerunPolicyFactory.java |   46 -
 .../apache/falcon/rerun/queue/ActiveMQueue.java |  139 -
 .../apache/falcon/rerun/queue/DelayedQueue.java |   45 -
 .../falcon/rerun/queue/InMemoryQueue.java       |  161 -
 .../falcon/rerun/service/LateRunService.java    |   78 -
 .../falcon/rerun/service/RetryService.java      |   79 -
 .../falcon/rerun/AbstractRerunPolicyTest.java   |   93 -
 .../falcon/rerun/handler/TestLateData.java      |  196 --
 .../rerun/handler/TestLateRerunHandler.java     |   63 -
 .../apache/falcon/rerun/queue/ActiveMQTest.java |   67 -
 .../falcon/rerun/queue/InMemoryQueueTest.java   |   89 -
 rerun/src/test/resources/cluster-template.xml   |   37 -
 rerun/src/test/resources/feed-template.xml      |   44 -
 rerun/src/test/resources/process-template.xml   |   49 -
 rerun/src/test/resources/process-template2.xml  |   52 -
 retention/pom.xml                               |   82 -
 .../apache/falcon/retention/FeedEvictor.java    |  125 -
 .../falcon/retention/FeedEvictorTest.java       |  576 ----
 scheduler/pom.xml                               |  244 --
 .../falcon/exception/DAGEngineException.java    |   48 -
 .../InvalidStateTransitionException.java        |   47 -
 .../exception/NotificationServiceException.java |   48 -
 .../falcon/exception/StateStoreException.java   |   47 -
 .../apache/falcon/execution/EntityExecutor.java |  131 -
 .../falcon/execution/ExecutionInstance.java     |  221 --
 .../execution/FalconExecutionService.java       |  259 --
 .../falcon/execution/NotificationHandler.java   |   59 -
 .../execution/ProcessExecutionInstance.java     |  336 --
 .../falcon/execution/ProcessExecutor.java       |  550 ---
 .../apache/falcon/execution/SchedulerUtil.java  |   54 -
 .../service/FalconNotificationService.java      |   76 -
 .../service/NotificationServicesRegistry.java   |  125 -
 .../notification/service/event/DataEvent.java   |   70 -
 .../notification/service/event/Event.java       |   38 -
 .../notification/service/event/EventType.java   |   30 -
 .../service/event/JobCompletedEvent.java        |   53 -
 .../service/event/JobScheduledEvent.java        |   75 -
 .../notification/service/event/RerunEvent.java  |   45 -
 .../service/event/TimeElapsedEvent.java         |   57 -
 .../notification/service/impl/AlarmService.java |  326 --
 .../service/impl/DataAvailabilityService.java   |  272 --
 .../service/impl/JobCompletionService.java      |  228 --
 .../service/impl/SchedulerService.java          |  441 ---
 .../service/request/AlarmRequest.java           |   84 -
 .../request/DataNotificationRequest.java        |  165 -
 .../JobCompletionNotificationRequest.java       |   62 -
 .../request/JobScheduleNotificationRequest.java |   60 -
 .../service/request/NotificationRequest.java    |   53 -
 .../org/apache/falcon/predicate/Predicate.java  |  283 --
 .../apache/falcon/state/EntityClusterID.java    |   51 -
 .../java/org/apache/falcon/state/EntityID.java  |   51 -
 .../org/apache/falcon/state/EntityState.java    |  181 -
 .../falcon/state/EntityStateChangeHandler.java  |   66 -
 .../main/java/org/apache/falcon/state/ID.java   |   73 -
 .../org/apache/falcon/state/InstanceID.java     |  102 -
 .../org/apache/falcon/state/InstanceState.java  |  307 --
 .../state/InstanceStateChangeHandler.java       |  107 -
 .../org/apache/falcon/state/StateMachine.java   |   34 -
 .../org/apache/falcon/state/StateService.java   |  198 --
 .../falcon/state/store/AbstractStateStore.java  |   96 -
 .../falcon/state/store/EntityStateStore.java    |   91 -
 .../falcon/state/store/InMemoryStateStore.java  |  289 --
 .../falcon/state/store/InstanceStateStore.java  |  151 -
 .../apache/falcon/state/store/StateStore.java   |   31 -
 .../falcon/state/store/jdbc/BeanMapperUtil.java |  324 --
 .../falcon/state/store/jdbc/EntityBean.java     |  117 -
 .../falcon/state/store/jdbc/InstanceBean.java   |  229 --
 .../falcon/state/store/jdbc/JDBCStateStore.java |  481 ---
 .../state/store/service/FalconJPAService.java   |  171 -
 .../falcon/tools/FalconStateStoreDBCLI.java     |  436 ---
 .../falcon/workflow/engine/DAGEngine.java       |  125 -
 .../workflow/engine/DAGEngineFactory.java       |   60 -
 .../workflow/engine/FalconWorkflowEngine.java   |  557 ---
 .../falcon/workflow/engine/OozieDAGEngine.java  |  468 ---
 .../src/main/resources/META-INF/persistence.xml |  104 -
 .../main/resources/falcon-buildinfo.properties  |   28 -
 .../execution/FalconExecutionServiceTest.java   |  634 ----
 .../apache/falcon/execution/MockDAGEngine.java  |  128 -
 .../falcon/execution/SchedulerUtilTest.java     |   50 -
 .../notification/service/AlarmServiceTest.java  |   77 -
 .../service/DataAvailabilityServiceTest.java    |  135 -
 .../service/SchedulerServiceTest.java           |  333 --
 .../apache/falcon/predicate/PredicateTest.java  |   53 -
 .../falcon/state/AbstractSchedulerTestBase.java |   70 -
 .../falcon/state/EntityStateServiceTest.java    |  136 -
 .../falcon/state/InstanceStateServiceTest.java  |  156 -
 .../state/service/TestFalconJPAService.java     |   64 -
 .../state/service/store/TestJDBCStateStore.java |  517 ---
 .../falcon/tools/TestFalconStateStoreDBCLI.java |   89 -
 .../engine/WorkflowEngineFactoryTest.java       |  123 -
 .../resources/config/cluster/cluster-0.1.xml    |   43 -
 .../src/test/resources/config/feed/feed-0.1.xml |   57 -
 .../resources/config/process/process-0.1.xml    |   54 -
 scheduler/src/test/resources/startup.properties |  134 -
 .../src/test/resources/statestore.credentials   |   20 -
 .../src/test/resources/statestore.properties    |   36 -
 site/0.3-incubating/dependency-convergence.html |    6 +-
 site/0.3-incubating/dependency-info.html        |    4 +-
 .../0.3-incubating/distribution-management.html |    4 +-
 site/0.3-incubating/integration.html            |    4 +-
 site/0.3-incubating/issue-tracking.html         |    4 +-
 site/0.3-incubating/license.html                |    4 +-
 site/0.3-incubating/mail-lists.html             |    4 +-
 site/0.3-incubating/plugin-management.html      |    4 +-
 site/0.3-incubating/plugins.html                |    4 +-
 site/0.3-incubating/project-info.html           |    4 +-
 site/0.3-incubating/project-summary.html        |    4 +-
 site/0.3-incubating/source-repository.html      |    4 +-
 site/0.3-incubating/team-list.html              |   18 +-
 site/0.4-incubating/dependency-convergence.html |    6 +-
 site/0.4-incubating/dependency-info.html        |    4 +-
 .../0.4-incubating/distribution-management.html |    4 +-
 site/0.4-incubating/integration.html            |    4 +-
 site/0.4-incubating/issue-tracking.html         |    4 +-
 site/0.4-incubating/license.html                |    4 +-
 site/0.4-incubating/mail-lists.html             |    4 +-
 site/0.4-incubating/plugin-management.html      |    4 +-
 site/0.4-incubating/plugins.html                |    4 +-
 site/0.4-incubating/project-info.html           |    4 +-
 site/0.4-incubating/project-summary.html        |    4 +-
 site/0.4-incubating/source-repository.html      |    4 +-
 site/0.4-incubating/team-list.html              |   18 +-
 site/0.5-incubating/dependency-convergence.html |    6 +-
 site/0.5-incubating/dependency-info.html        |    4 +-
 .../0.5-incubating/distribution-management.html |    4 +-
 site/0.5-incubating/integration.html            |    4 +-
 site/0.5-incubating/issue-tracking.html         |    4 +-
 site/0.5-incubating/license.html                |    4 +-
 site/0.5-incubating/mail-lists.html             |    4 +-
 site/0.5-incubating/plugin-management.html      |    4 +-
 site/0.5-incubating/plugins.html                |    4 +-
 site/0.5-incubating/project-info.html           |    4 +-
 site/0.5-incubating/project-summary.html        |    4 +-
 site/0.5-incubating/source-repository.html      |    4 +-
 site/0.5-incubating/team-list.html              |   18 +-
 site/0.6-incubating/dependency-convergence.html |    6 +-
 site/0.6-incubating/dependency-info.html        |    4 +-
 .../0.6-incubating/distribution-management.html |    4 +-
 site/0.6-incubating/integration.html            |    4 +-
 site/0.6-incubating/issue-tracking.html         |    4 +-
 site/0.6-incubating/license.html                |    4 +-
 site/0.6-incubating/mail-lists.html             |    4 +-
 site/0.6-incubating/plugin-management.html      |    4 +-
 site/0.6-incubating/plugins.html                |    4 +-
 site/0.6-incubating/project-info.html           |    4 +-
 site/0.6-incubating/project-summary.html        |    4 +-
 site/0.6-incubating/source-repository.html      |    4 +-
 site/0.6-incubating/team-list.html              |   18 +-
 site/0.6.1/dependency-convergence.html          |    8 +-
 site/0.6.1/dependency-info.html                 |    6 +-
 site/0.6.1/distribution-management.html         |    6 +-
 site/0.6.1/integration.html                     |    6 +-
 site/0.6.1/issue-tracking.html                  |    6 +-
 site/0.6.1/license.html                         |    6 +-
 site/0.6.1/mail-lists.html                      |    6 +-
 site/0.6.1/plugin-management.html               |    6 +-
 site/0.6.1/plugins.html                         |    6 +-
 site/0.6.1/project-info.html                    |    6 +-
 site/0.6.1/project-summary.html                 |    6 +-
 site/0.6.1/source-repository.html               |    6 +-
 site/0.6.1/team-list.html                       |   20 +-
 site/0.7/dependency-convergence.html            |    8 +-
 site/0.7/dependency-info.html                   |    6 +-
 site/0.7/distribution-management.html           |    6 +-
 site/0.7/integration.html                       |    6 +-
 site/0.7/issue-tracking.html                    |    6 +-
 site/0.7/license.html                           |    6 +-
 site/0.7/mail-lists.html                        |    6 +-
 site/0.7/plugin-management.html                 |    6 +-
 site/0.7/plugins.html                           |    6 +-
 site/0.7/project-info.html                      |    6 +-
 site/0.7/project-summary.html                   |    6 +-
 site/0.7/source-repository.html                 |    6 +-
 site/0.7/team-list.html                         |   20 +-
 site/0.8/dependency-convergence.html            |    8 +-
 site/0.8/dependency-info.html                   |    6 +-
 site/0.8/distribution-management.html           |    6 +-
 site/0.8/integration.html                       |    6 +-
 site/0.8/issue-tracking.html                    |    6 +-
 site/0.8/license.html                           |    6 +-
 site/0.8/mail-lists.html                        |    6 +-
 site/0.8/plugin-management.html                 |    6 +-
 site/0.8/plugins.html                           |    6 +-
 site/0.8/project-info.html                      |    6 +-
 site/0.8/project-summary.html                   |    6 +-
 site/0.8/source-repository.html                 |    6 +-
 site/0.8/team-list.html                         |   20 +-
 site/0.9/dependency-convergence.html            |    8 +-
 site/0.9/dependency-info.html                   |    6 +-
 site/0.9/distribution-management.html           |    6 +-
 site/0.9/integration.html                       |    6 +-
 site/0.9/issue-tracking.html                    |    6 +-
 site/0.9/license.html                           |    6 +-
 site/0.9/mail-lists.html                        |    6 +-
 site/0.9/plugin-management.html                 |    6 +-
 site/0.9/plugins.html                           |    6 +-
 site/0.9/project-info.html                      |    6 +-
 site/0.9/project-summary.html                   |    6 +-
 site/0.9/source-repository.html                 |    6 +-
 site/0.9/team-list.html                         |   20 +-
 site/issue-tracking.html                        |    6 +-
 site/license.html                               |    6 +-
 site/mail-lists.html                            |    6 +-
 site/project-info.html                          |    6 +-
 site/source-repository.html                     |    6 +-
 site/team-list.html                             |    6 +-
 src/bin/falcon                                  |   39 -
 src/bin/falcon-config.sh                        |  127 -
 src/bin/falcon-db.sh                            |   49 -
 src/bin/falcon-start                            |   32 -
 src/bin/falcon-status                           |   32 -
 src/bin/falcon-stop                             |   32 -
 src/bin/package.sh                              |   70 -
 src/bin/prism-start                             |   37 -
 src/bin/prism-status                            |   32 -
 src/bin/prism-stop                              |   32 -
 src/bin/service-start.sh                        |   59 -
 src/bin/service-status.sh                       |   48 -
 src/bin/service-stop.sh                         |   56 -
 src/build/checkstyle-java-header.txt            |   17 -
 src/build/checkstyle-noframes.xsl               |  218 --
 src/build/checkstyle-suppressions.xml           |   26 -
 src/build/checkstyle.xml                        |  238 --
 src/build/findbugs-exclude.xml                  |   53 -
 src/conf/client.properties                      |   25 -
 src/conf/falcon-env.sh                          |   55 -
 src/conf/log4j.xml                              |  126 -
 src/conf/prism-client.properties                |   24 -
 src/conf/runtime.properties                     |   77 -
 src/conf/startup.properties                     |  304 --
 src/conf/statestore.credentials                 |   22 -
 src/conf/statestore.properties                  |   48 -
 src/main/assemblies/distributed-package.xml     |  186 -
 src/main/assemblies/empty.xml                   |   21 -
 src/main/assemblies/src-package.xml             |   49 -
 src/main/assemblies/standalone-package.xml      |  169 -
 src/main/examples/app/hive/wordcount.hql        |   19 -
 src/main/examples/app/oozie-mr/workflow.xml     |   63 -
 src/main/examples/app/pig/hcat-wordcount.pig    |   23 -
 src/main/examples/app/pig/wordcount.pig         |   23 -
 src/main/examples/data/generate.sh              |   53 -
 src/main/examples/data/hcat-generate.sh         |   45 -
 .../entity/filesystem/embedded-cluster.xml      |   51 -
 src/main/examples/entity/filesystem/in-feed.xml |   39 -
 .../entity/filesystem/oozie-mr-process.xml      |   50 -
 .../examples/entity/filesystem/out-feed.xml     |   39 -
 .../examples/entity/filesystem/pig-process.xml  |   43 -
 .../entity/filesystem/replication-feed.xml      |   46 -
 .../entity/filesystem/standalone-cluster.xml    |   43 -
 .../filesystem/standalone-target-cluster.xml    |   43 -
 src/main/examples/entity/hcat/hcat-in-feed.xml  |   37 -
 src/main/examples/entity/hcat/hcat-out-feed.xml |   35 -
 .../examples/entity/hcat/hcat-pig-process.xml   |   43 -
 .../entity/hcat/hcat-replication-feed.xml       |   42 -
 .../entity/hcat/hcat-standalone-cluster.xml     |   45 -
 .../hcat/hcat-standalone-target-cluster.xml     |   45 -
 src/main/examples/entity/hcat/hive-process.xml  |   43 -
 test-tools/hadoop-webapp/pom.xml                |  353 --
 .../org/apache/falcon/JobTrackerService.java    |   28 -
 .../falcon/listener/HadoopStartupListener.java  |  144 -
 .../src/main/resources/core-site.xml            |   55 -
 .../src/main/resources/hdfs-site.xml            |   64 -
 .../src/main/resources/hive-site.xml            |   60 -
 .../hadoop-webapp/src/main/resources/log4j.xml  |   38 -
 .../src/main/resources/mapred-site.xml          |   72 -
 .../src/main/resources/yarn-site.xml            |   59 -
 .../src/main/webapp/WEB-INF/web.xml             |   32 -
 .../apache/hadoop/mapred/LocalRunnerTest.java   |   37 -
 .../org/apache/hadoop/mapred/LocalRunnerV2.java |  242 --
 test-tools/hcatalog-sharelib/pom.xml            |   64 -
 test-tools/hive-sharelib/pom.xml                |   64 -
 test-tools/oozie-sharelib/pom.xml               |   63 -
 test-tools/pig-sharelib/pom.xml                 |   63 -
 test-tools/pom.xml                              |   42 -
 test-util/pom.xml                               |   88 -
 .../falcon/cluster/util/EmbeddedCluster.java    |  151 -
 .../cluster/util/EntityBuilderTestUtil.java     |  184 -
 .../falcon/cluster/util/StandAloneCluster.java  |   58 -
 test-util/src/main/resources/core-site.xml      |   31 -
 trunk/general/pom.xml                           |    4 -
 unit/pom.xml                                    |  119 -
 .../java/org/apache/falcon/unit/FalconUnit.java |  218 --
 .../apache/falcon/unit/FalconUnitClient.java    |  455 ---
 .../apache/falcon/unit/FalconUnitHelper.java    |  100 -
 .../unit/LocalFalconClientProtocolProvider.java |   62 -
 .../unit/LocalFalconRPCClientFactory.java       |  241 --
 .../falcon/unit/LocalInstanceManager.java       |   94 -
 .../unit/LocalSchedulableEntityManager.java     |  114 -
 ...op.mapreduce.protocol.ClientProtocolProvider |   18 -
 unit/src/main/resources/core-site.xml           |   38 -
 unit/src/main/resources/deploy.properties       |   21 -
 .../main/resources/localoozie-log4j.properties  |   34 -
 unit/src/main/resources/log4j.xml               |   91 -
 unit/src/main/resources/mapred-site.xml         |   35 -
 unit/src/main/resources/oozie-site.xml          |  217 --
 unit/src/main/resources/startup.properties      |  147 -
 unit/src/main/resources/yarn-site.xml           |   30 -
 .../apache/falcon/unit/FalconUnitTestBase.java  |  396 ---
 .../org/apache/falcon/unit/TestFalconUnit.java  |  385 ---
 .../falcon/unit/examples/JavaExample.java       |   65 -
 .../unit/examples/JavaHelloWorldExample.java    |   33 -
 .../falcon/unit/examples/JavaSleepExample.java  |   33 -
 unit/src/test/resources/infeed.xml              |   39 -
 unit/src/test/resources/input.txt               |   18 -
 .../test/resources/local-cluster-template.xml   |   36 -
 unit/src/test/resources/outfeed.xml             |   39 -
 unit/src/test/resources/process.xml             |   50 -
 unit/src/test/resources/sleepWorkflow.xml       |   41 -
 unit/src/test/resources/workflow.xml            |   43 -
 webapp/pom.xml                                  |  631 ----
 .../conf/oozie/conf/hadoop-conf/core-site.xml   |   47 -
 .../conf/oozie/conf/hadoop-conf/mapred-site.xml |   28 -
 .../src/conf/oozie/conf/oozie-log4j.properties  |   43 -
 webapp/src/conf/oozie/conf/oozie-site.xml       |  583 ----
 .../falcon/resource/ConfigSyncService.java      |   90 -
 .../apache/falcon/resource/InstanceManager.java |  327 --
 .../resource/SchedulableEntityManager.java      |  344 --
 webapp/src/main/resources/deploy.properties     |   21 -
 .../main/resources/falcon-buildinfo.properties  |   28 -
 webapp/src/main/resources/log4j.xml             |  115 -
 .../src/main/webapp/WEB-INF/distributed/web.xml |  134 -
 webapp/src/main/webapp/WEB-INF/embedded/web.xml |   93 -
 webapp/src/main/webapp/WEB-INF/web.xml          |   93 -
 webapp/src/main/webapp/index.html               |   44 -
 .../catalog/CatalogPartitionHandlerIT.java      |   85 -
 .../apache/falcon/catalog/CatalogStorageIT.java |  170 -
 .../falcon/catalog/HiveCatalogServiceIT.java    |  390 ---
 .../java/org/apache/falcon/cli/FalconCLIIT.java | 1067 ------
 .../org/apache/falcon/cli/FalconCLISmokeIT.java |  109 -
 .../apache/falcon/late/LateDataHandlerIT.java   |  220 --
 .../apache/falcon/lifecycle/FeedImportIT.java   |  229 --
 .../lifecycle/FileSystemFeedReplicationIT.java  |  268 --
 .../lifecycle/TableStorageFeedEvictorIT.java    |  591 ----
 .../TableStorageFeedReplicationIT.java          |  239 --
 .../org/apache/falcon/process/PigProcessIT.java |  130 -
 .../falcon/process/TableStorageProcessIT.java   |  206 --
 .../AbstractSchedulerManagerJerseyIT.java       |  244 --
 .../falcon/resource/AbstractTestContext.java    |   69 -
 .../falcon/resource/EntityManagerJerseyIT.java  |  890 -----
 .../resource/EntityManagerJerseySmokeIT.java    |  124 -
 .../EntityManagerPaginationJerseyIT.java        |  166 -
 .../EntitySchedulerManagerJerseyIT.java         |  116 -
 .../InstanceSchedulerManagerJerseyIT.java       |  171 -
 .../resource/MetadataResourceJerseyIT.java      |  136 -
 .../resource/ProcessInstanceManagerIT.java      |  252 --
 .../org/apache/falcon/resource/TestContext.java |  580 ----
 .../apache/falcon/resource/UnitTestContext.java |  124 -
 .../java/org/apache/falcon/util/FSUtils.java    |  101 -
 .../org/apache/falcon/util/HiveTestUtils.java   |  196 --
 .../org/apache/falcon/util/HsqldbTestUtils.java |  310 --
 .../org/apache/falcon/util/OozieTestUtils.java  |  268 --
 .../util/ResourcesReflectionUtilTest.java       |   52 -
 .../validation/ClusterEntityValidationIT.java   |  213 --
 .../validation/FeedEntityValidationIT.java      |  146 -
 webapp/src/test/resources/apps/data/data.txt    | 1000 ------
 webapp/src/test/resources/apps/hive/script.hql  |   19 -
 webapp/src/test/resources/apps/pig/id.pig       |   20 -
 webapp/src/test/resources/apps/pig/table-id.pig |   21 -
 webapp/src/test/resources/client.properties     |   21 -
 webapp/src/test/resources/cluster-template.xml  |   41 -
 .../test/resources/credential_provider.jceks    |  Bin 504 -> 0 bytes
 .../src/test/resources/datasource-template1.xml |   46 -
 .../src/test/resources/datasource-template2.xml |   46 -
 .../src/test/resources/datasource-template3.xml |   49 -
 .../src/test/resources/datasource-template4.xml |   36 -
 webapp/src/test/resources/feed-template1.xml    |   45 -
 webapp/src/test/resources/feed-template2.xml    |   46 -
 webapp/src/test/resources/feed-template3.xml    |   59 -
 webapp/src/test/resources/feed-template4.xml    |   59 -
 webapp/src/test/resources/fs-workflow.xml       |   32 -
 .../resources/hdfs-replication-template.xml     |   44 -
 .../test/resources/hdfs-replication.properties  |   47 -
 .../src/test/resources/helloworldworkflow.xml   |   39 -
 webapp/src/test/resources/hive-table-feed.xml   |   37 -
 .../local-process-noinputs-template.xml         |   42 -
 .../src/test/resources/pig-process-template.xml |   49 -
 webapp/src/test/resources/process-template.xml  |   52 -
 webapp/src/test/resources/process-version-0.xml |   60 -
 webapp/src/test/resources/runtime.properties    |   56 -
 webapp/src/test/resources/sleepWorkflow.xml     |   41 -
 webapp/src/test/resources/startup.properties    |  168 -
 .../src/test/resources/statestore.credentials   |   20 -
 webapp/src/test/resources/statestore.properties |   35 -
 webapp/src/test/resources/table/bcp-cluster.xml |   52 -
 .../table/complex-replicating-feed.xml          |   71 -
 .../table/customer-fs-replicating-feed.xml      |   56 -
 .../table/customer-table-replicating-feed.xml   |   46 -
 .../resources/table/hive-process-template.xml   |   51 -
 .../table/multiple-targets-replicating-feed.xml |   74 -
 .../test/resources/table/pig-process-tables.xml |   51 -
 .../test/resources/table/primary-cluster.xml    |   52 -
 .../test/resources/table/table-feed-input.xml   |   37 -
 .../test/resources/table/table-feed-output.xml  |   37 -
 .../resources/table/target-cluster-alpha.xml    |   52 -
 .../resources/table/target-cluster-beta.xml     |   52 -
 .../resources/table/target-cluster-gamma.xml    |   52 -
 1639 files changed, 374 insertions(+), 249192 deletions(-)
----------------------------------------------------------------------



[31/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/ProcessHelperTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/ProcessHelperTest.java b/common/src/test/java/org/apache/falcon/entity/ProcessHelperTest.java
deleted file mode 100644
index 0729f15..0000000
--- a/common/src/test/java/org/apache/falcon/entity/ProcessHelperTest.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Clusters;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.TimeZone;
-
-
-/**
- * Tests for ProcessHelper methods.
- */
-public class ProcessHelperTest extends AbstractTestBase {
-    private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
-    private ConfigurationStore store;
-
-    @BeforeClass
-    public void init() throws Exception {
-        initConfigStore();
-    }
-
-    @BeforeMethod
-    public void setUp() throws Exception {
-        cleanupStore();
-        store = ConfigurationStore.get();
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testBeforeStartInstance() throws FalconException, ParseException {
-        // create a process with input feeds
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-
-        // find the input Feed instances time
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Inputs inputs = new Inputs();
-        Input input = getInput("inputFeed", feed.getName(), "now(0,-20)", "now(0,0)", false);
-        inputs.getInputs().add(input);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Date processInstanceDate = getDate("2012-02-28 10:27 UTC");
-        ProcessHelper.getInputFeedInstances(process, processInstanceDate, cluster, false);
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testEqualsToEndInstance() throws FalconException, ParseException {
-        // create a process with input feeds
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-
-        // find the input Feed instances time
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Inputs inputs = new Inputs();
-        Input input = getInput("inputFeed", feed.getName(), "now(0,-20)", "now(0,0)", false);
-        inputs.getInputs().add(input);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Date processInstanceDate = getDate("2012-02-28 10:47 UTC");
-        ProcessHelper.getInputFeedInstances(process, processInstanceDate, cluster, false);
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testOutOfSyncInstance() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Inputs inputs = new Inputs();
-        Input input = getInput("inputFeed", feed.getName(), "now(0,-20)", "now(0,0)", false);
-        inputs.getInputs().add(input);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-        Date processInstanceDate = getDate("2012-02-28 10:40 UTC");
-        ProcessHelper.getInputFeedInstances(process, processInstanceDate, cluster, false);
-    }
-
-    @Test
-    public void testProcessWithNoDependencies() throws Exception {
-        Cluster cluster = publishCluster();
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        store.publish(EntityType.PROCESS, process);
-        Date processInstanceDate = getDate("2012-02-28 10:37 UTC");
-        Set<SchedulableEntityInstance> inputFeedInstances = ProcessHelper.getInputFeedInstances(process,
-            processInstanceDate, cluster, false);
-        Assert.assertTrue(inputFeedInstances.isEmpty());
-        Set<SchedulableEntityInstance> res = ProcessHelper.getOutputFeedInstances(process, processInstanceDate,
-            cluster);
-        Assert.assertTrue(res.isEmpty());
-    }
-
-    @Test
-    public void testGetInputFeedInstances() throws FalconException, ParseException {
-        // create a process with input feeds
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-
-        // find the input Feed instances time
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2016-02-28 10:37 UTC");
-        Inputs inputs = new Inputs();
-        Input input = getInput("inputFeed", feed.getName(), "now(0,-20)", "now(0,0)", false);
-        inputs.getInputs().add(input);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Date processInstanceDate = getDate("2012-02-28 10:37 UTC");
-        Set<SchedulableEntityInstance> inputFeedInstances = ProcessHelper.getInputFeedInstances(process,
-                processInstanceDate, cluster, false);
-        Assert.assertEquals(inputFeedInstances.size(), 5);
-
-        Set<SchedulableEntityInstance> expectedInputFeedInstances = new HashSet<>();
-        String[] inputInstances = { "2012-02-28 10:15 UTC", "2012-02-28 10:20 UTC", "2012-02-28 10:25 UTC",
-            "2012-02-28 10:30 UTC", "2012-02-28 10:35 UTC", };
-        for (String d : inputInstances) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(feed.getName(), cluster.getName(),
-                    getDate(d), EntityType.FEED);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expectedInputFeedInstances.add(i);
-        }
-        Assert.assertTrue(inputFeedInstances.equals(expectedInputFeedInstances));
-    }
-
-    @Test
-    public void testGetOutputFeedInstances() throws FalconException, ParseException {
-        // create a process with input feeds
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2012-02-27 11:00 UTC", "2016-02-28 11:00 UTC");
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Outputs outputs = new Outputs();
-        outputs.getOutputs().add(getOutput("outputFeed", feed.getName(), "now(0,0)"));
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = ProcessHelper.getOutputFeedInstances(process,
-                getDate("2012-02-28 10:00 UTC"), cluster);
-
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        SchedulableEntityInstance ins = new SchedulableEntityInstance(feed.getName(), cluster.getName(),
-                getDate("2012-02-27 11:00 UTC"), EntityType.FEED);
-        ins.setTags(SchedulableEntityInstance.OUTPUT);
-        expected.add(ins);
-
-        Assert.assertEquals(result, expected);
-
-    }
-
-    private org.apache.falcon.entity.v0.process.Validity getProcessValidity(String start, String end) throws
-            ParseException {
-
-        org.apache.falcon.entity.v0.process.Validity validity = new org.apache.falcon.entity.v0.process.Validity();
-        validity.setStart(getDate(start));
-        validity.setEnd(getDate(end));
-        return validity;
-    }
-
-    private Date getDate(String dateString) throws ParseException {
-        return new SimpleDateFormat("yyyy-MM-dd HH:mm Z").parse(dateString);
-    }
-
-    private org.apache.falcon.entity.v0.feed.Validity getFeedValidity(String start, String end) throws ParseException {
-        org.apache.falcon.entity.v0.feed.Validity validity = new org.apache.falcon.entity.v0.feed.Validity();
-        validity.setStart(getDate(start));
-        validity.setEnd(getDate(end));
-        return validity;
-    }
-
-    private Input getInput(String name, String feedName, String start, String end, boolean isOptional) {
-        Input inFeed = new Input();
-        inFeed.setName(name);
-        inFeed.setFeed(feedName);
-        inFeed.setStart(start);
-        inFeed.setEnd(end);
-        inFeed.setOptional(isOptional);
-        return inFeed;
-    }
-
-    private Output getOutput(String name, String feedName, String instance) {
-        Output output = new Output();
-        output.setInstance(instance);
-        output.setFeed(feedName);
-        output.setName(name);
-        return output;
-    }
-
-    private Cluster publishCluster() throws FalconException {
-        Cluster cluster = new Cluster();
-        cluster.setName("feedCluster");
-        cluster.setColo("colo");
-        store.publish(EntityType.CLUSTER, cluster);
-        return cluster;
-
-    }
-
-    private Feed publishFeed(Cluster cluster, String frequency, String start, String end)
-        throws FalconException, ParseException {
-
-        Feed feed = new Feed();
-        feed.setName("feed");
-        Frequency f = new Frequency(frequency);
-        feed.setFrequency(f);
-        feed.setTimezone(UTC);
-        Clusters fClusters = new Clusters();
-        org.apache.falcon.entity.v0.feed.Cluster fCluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        fCluster.setName(cluster.getName());
-        fCluster.setValidity(getFeedValidity(start, end));
-        fClusters.getClusters().add(fCluster);
-        feed.setClusters(fClusters);
-        store.publish(EntityType.FEED, feed);
-
-        return feed;
-    }
-
-    private Process prepareProcess(Cluster cluster, String frequency, String start, String end) throws ParseException {
-        Process process = new Process();
-        process.setName("process");
-        process.setTimezone(UTC);
-        org.apache.falcon.entity.v0.process.Clusters pClusters = new org.apache.falcon.entity.v0.process.Clusters();
-        org.apache.falcon.entity.v0.process.Cluster pCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        pCluster.setName(cluster.getName());
-        org.apache.falcon.entity.v0.process.Validity validity = getProcessValidity(start, end);
-        pCluster.setValidity(validity);
-        pClusters.getClusters().add(pCluster);
-        process.setClusters(pClusters);
-        Frequency f = new Frequency(frequency);
-        process.setFrequency(f);
-        return process;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/StorageFactoryTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/StorageFactoryTest.java b/common/src/test/java/org/apache/falcon/entity/StorageFactoryTest.java
deleted file mode 100644
index eb0127d..0000000
--- a/common/src/test/java/org/apache/falcon/entity/StorageFactoryTest.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.entity.parser.ClusterEntityParser;
-import org.apache.falcon.entity.parser.EntityParserFactory;
-import org.apache.falcon.entity.parser.FeedEntityParser;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.InputStream;
-import java.util.List;
-
-/**
- * Test for storage factory methods in feed helper.
- */
-public class StorageFactoryTest {
-
-    private static final String CLUSTER_XML = "/config/cluster/cluster-0.1.xml";
-
-    private static final String FS_FEED_UNIFORM = "/config/feed/feed-0.1.xml";
-    private static final String FS_FEED_OVERRIDE = "/config/feed/feed-0.2.xml";
-
-    private static final String TABLE_FEED_UNIFORM = "/config/feed/hive-table-feed.xml";
-    private static final String TABLE_FEED_OVERRIDE = "/config/feed/hive-table-feed-out.xml";
-
-    private static final String OVERRIDE_TBL_LOC = "/testCluster/clicks-summary/ds=${YEAR}-${MONTH}-${DAY}-${HOUR}";
-
-    private final ClusterEntityParser clusterParser =
-            (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-    private final FeedEntityParser feedParser =
-            (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-
-    private Cluster clusterEntity;
-    private Feed fsFeedWithUniformStorage;
-    private Feed fsFeedWithOverriddenStorage;
-    private Feed tableFeedWithUniformStorage;
-    private Feed tableFeedWithOverriddenStorage;
-
-    @BeforeClass
-    public void setup() throws Exception {
-        InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-        clusterEntity = clusterParser.parse(stream);
-        stream.close();
-        Interface registry = ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY);
-        registry.setEndpoint("thrift://localhost:9083");
-        ConfigurationStore.get().publish(EntityType.CLUSTER, clusterEntity);
-
-        stream = this.getClass().getResourceAsStream(FS_FEED_UNIFORM);
-        fsFeedWithUniformStorage = feedParser.parse(stream);
-        stream.close();
-
-        stream = this.getClass().getResourceAsStream(FS_FEED_OVERRIDE);
-        fsFeedWithOverriddenStorage = feedParser.parse(stream);
-        stream.close();
-
-        stream = this.getClass().getResourceAsStream(TABLE_FEED_UNIFORM);
-        tableFeedWithUniformStorage = feedParser.parse(stream);
-        stream.close();
-
-        stream = this.getClass().getResourceAsStream(TABLE_FEED_OVERRIDE);
-        tableFeedWithOverriddenStorage = feedParser.parse(stream);
-        stream.close();
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        ConfigurationStore.get().remove(EntityType.CLUSTER, clusterEntity.getName());
-    }
-
-    @DataProvider (name = "locationsDataProvider")
-    private Object[][] createLocationsDataProvider() {
-        return new Object[][] {
-            {fsFeedWithUniformStorage, "/projects/falcon/clicks"},
-            {fsFeedWithOverriddenStorage, "/testCluster/projects/falcon/clicks"},
-        };
-    }
-
-    @Test (dataProvider = "locationsDataProvider")
-    public void testGetLocations(Feed feed, String dataPath) {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, clusterEntity.getName());
-        List<Location> locations = FeedHelper.getLocations(feedCluster, feed);
-        for (Location location : locations) {
-            if (location.getType() == LocationType.DATA) {
-                Assert.assertEquals(location.getPath(), dataPath);
-            }
-        }
-    }
-
-    @DataProvider (name = "tableDataProvider")
-    private Object[][] createTableDataProvider() {
-        return new Object[][] {
-            {tableFeedWithUniformStorage, "catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}"},
-            {tableFeedWithOverriddenStorage, "catalog:testCluster:clicks-summary#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}"},
-        };
-    }
-
-    @Test (dataProvider = "tableDataProvider")
-    public void testGetTable(Feed feed, String dataPath) {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, clusterEntity.getName());
-        CatalogTable table = FeedHelper.getTable(feedCluster, feed);
-        Assert.assertEquals(table.getUri(), dataPath);
-    }
-
-    private static final String UNIFORM_TABLE = "${hcatNode}/default/clicks/ds=${YEAR}-${MONTH}-${DAY}-${HOUR}";
-    private static final String OVERRIDETBL = "${hcatNode}/default/clicks-summary/ds=${YEAR}-${MONTH}-${DAY}-${HOUR}";
-
-
-    @DataProvider (name = "uniformFeedStorageDataProvider")
-    private Object[][] createUniformFeedStorageDataProvider() {
-        return new Object[][] {
-            {fsFeedWithUniformStorage, Storage.TYPE.FILESYSTEM, "${nameNode}/projects/falcon/clicks"},
-            {fsFeedWithOverriddenStorage, Storage.TYPE.FILESYSTEM, "${nameNode}/projects/falcon/clicks"},
-            {tableFeedWithUniformStorage, Storage.TYPE.TABLE, UNIFORM_TABLE},
-            {tableFeedWithOverriddenStorage, Storage.TYPE.TABLE, OVERRIDETBL},
-        };
-    }
-
-    @Test (dataProvider = "uniformFeedStorageDataProvider")
-    public void testCreateStorageWithFeed(Feed feed, Storage.TYPE storageType,
-                                            String dataLocation) throws Exception {
-        Storage storage = FeedHelper.createStorage(feed);
-        Assert.assertEquals(storage.getType(), storageType);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-
-        if (storageType == Storage.TYPE.TABLE) {
-            Assert.assertEquals(((CatalogStorage) storage).getDatabase(), "default");
-        }
-    }
-
-    @DataProvider (name = "overriddenFeedStorageDataProvider")
-    private Object[][] createFeedStorageDataProvider() {
-        return new Object[][] {
-            {fsFeedWithUniformStorage, Storage.TYPE.FILESYSTEM, "/projects/falcon/clicks"},
-            {fsFeedWithOverriddenStorage, Storage.TYPE.FILESYSTEM, "/testCluster/projects/falcon/clicks"},
-            {tableFeedWithUniformStorage, Storage.TYPE.TABLE, "/default/clicks/ds=${YEAR}-${MONTH}-${DAY}-${HOUR}"},
-            {tableFeedWithOverriddenStorage, Storage.TYPE.TABLE, OVERRIDE_TBL_LOC},
-        };
-    }
-
-    @Test (dataProvider = "overriddenFeedStorageDataProvider")
-    public void testCreateStorageWithFeedAndClusterEntity(Feed feed, Storage.TYPE storageType,
-                                                          String dataLocation) throws Exception {
-        Storage storage = FeedHelper.createStorage(clusterEntity, feed);
-        Assert.assertEquals(storage.getType(), storageType);
-
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            dataLocation = ClusterHelper.getStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            dataLocation =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-        }
-
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @Test (dataProvider = "overriddenFeedStorageDataProvider")
-    public void testCreateStorageWithFeedAndClusterName(Feed feed, Storage.TYPE storageType,
-                                                        String dataLocation) throws Exception {
-        Storage storage = FeedHelper.createStorage(clusterEntity.getName(), feed);
-        Assert.assertEquals(storage.getType(), storageType);
-
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            dataLocation = ClusterHelper.getStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            dataLocation =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-        }
-
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @Test (dataProvider = "overriddenFeedStorageDataProvider")
-    public void testCreateStorageWithFeedAndFeedCluster(Feed feed, Storage.TYPE storageType,
-                                                        String dataLocation) throws Exception {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, clusterEntity.getName());
-        Storage storage = FeedHelper.createStorage(feedCluster, feed);
-        Assert.assertEquals(storage.getType(), storageType);
-
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            dataLocation = ClusterHelper.getStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            dataLocation =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-        }
-
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @Test (dataProvider = "overriddenFeedStorageDataProvider")
-    public void testCreateStorageWithAll(Feed feed, Storage.TYPE storageType,
-                                         String dataLocation) throws Exception {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, clusterEntity.getName());
-        Storage storage = FeedHelper.createStorage(feedCluster, feed, clusterEntity);
-        Assert.assertEquals(storage.getType(), storageType);
-
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            dataLocation = ClusterHelper.getStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            dataLocation =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-        }
-
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @Test (dataProvider = "overriddenFeedStorageDataProvider")
-    public void testCreateReadOnlyStorage(Feed feed, Storage.TYPE storageType,
-                                          String dataLocation) throws Exception {
-        Storage readOnlyStorage = FeedHelper.createReadOnlyStorage(clusterEntity, feed);
-        Assert.assertEquals(readOnlyStorage.getType(), storageType);
-
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            dataLocation = ClusterHelper.getReadOnlyStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            dataLocation =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-        }
-
-        Assert.assertEquals(readOnlyStorage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @DataProvider (name = "uriTemplateDataProvider")
-    private Object[][] createUriTemplateDataProvider() {
-        return new Object[][] {
-            {Storage.TYPE.FILESYSTEM, "/projects/falcon/clicks"},
-            {Storage.TYPE.FILESYSTEM, "/testCluster/projects/falcon/clicks"},
-            {Storage.TYPE.TABLE, "/default/clicks/ds=${YEAR}-${MONTH}-${DAY}-${HOUR}"},
-            {Storage.TYPE.TABLE, OVERRIDE_TBL_LOC},
-        };
-    }
-
-    @Test (dataProvider = "uriTemplateDataProvider")
-    public void testCreateStorageWithUriTemplate(Storage.TYPE storageType,
-                                                 String dataLocation) throws Exception {
-        String uriTemplate = null;
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            uriTemplate = "DATA=" + ClusterHelper.getStorageUrl(clusterEntity) + dataLocation + "#";
-            dataLocation = ClusterHelper.getStorageUrl(clusterEntity) + dataLocation;
-        } else if (storageType == Storage.TYPE.TABLE) {
-            uriTemplate =
-                    ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).getEndpoint() + dataLocation;
-            dataLocation = uriTemplate;
-        }
-
-        Storage storage = FeedHelper.createStorage(storageType.name(), uriTemplate);
-        Assert.assertEquals(storage.getType(), storageType);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), dataLocation);
-    }
-
-    @DataProvider (name = "storageTypeDataProvider")
-    private Object[][] createStorageTypeDataProvider() {
-        return new Object[][] {
-            {fsFeedWithUniformStorage, Storage.TYPE.FILESYSTEM},
-            {fsFeedWithOverriddenStorage, Storage.TYPE.FILESYSTEM},
-            {tableFeedWithUniformStorage, Storage.TYPE.TABLE},
-            {tableFeedWithOverriddenStorage, Storage.TYPE.TABLE},
-        };
-    }
-
-    @Test (dataProvider = "storageTypeDataProvider")
-    public void testGetStorageTypeWithFeed(Feed feed, Storage.TYPE expectedStorageType) throws Exception {
-        Storage.TYPE actualStorageType = FeedHelper.getStorageType(feed);
-        Assert.assertEquals(actualStorageType, expectedStorageType);
-
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, clusterEntity.getName());
-        actualStorageType = FeedHelper.getStorageType(feed, feedCluster);
-        Assert.assertEquals(actualStorageType, expectedStorageType);
-
-        actualStorageType = FeedHelper.getStorageType(feed, clusterEntity);
-        Assert.assertEquals(actualStorageType, expectedStorageType);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/TestWorkflowNameBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/TestWorkflowNameBuilder.java b/common/src/test/java/org/apache/falcon/entity/TestWorkflowNameBuilder.java
deleted file mode 100644
index 5b1af78..0000000
--- a/common/src/test/java/org/apache/falcon/entity/TestWorkflowNameBuilder.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity;
-
-import org.apache.falcon.Pair;
-import org.apache.falcon.Tag;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Process;
-import org.testng.Assert;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.Arrays;
-
-/**
- * Test for workflow name builder.
- */
-public class TestWorkflowNameBuilder {
-
-    @Test
-    public void getTagTest() {
-        Feed feed = new Feed();
-        feed.setName("raw-logs");
-
-        WorkflowNameBuilder<Feed> builder = new WorkflowNameBuilder<Feed>(feed);
-        Tag tag = builder.getWorkflowTag("FALCON_FEED_RETENTION_raw-logs");
-        Assert.assertEquals(tag, Tag.RETENTION);
-
-        tag = builder.getWorkflowTag("FALCON_FEED_raw-logs");
-        Assert.assertNull(tag);
-
-        tag = builder.getWorkflowTag("FALCON_FEED_REPLICATION_raw-logs_corp1");
-        Assert.assertEquals(tag, Tag.REPLICATION);
-
-    }
-
-    @Test
-    public void getSuffixesTest() {
-        Feed feed = new Feed();
-        feed.setName("raw-logs");
-        WorkflowNameBuilder<Feed> builder = new WorkflowNameBuilder<Feed>(feed);
-
-        String suffixes = builder
-                .getWorkflowSuffixes("FALCON_FEED_REPLICATION_raw-logs_corp-1");
-        Assert.assertEquals(suffixes, "_corp-1");
-
-        suffixes = builder
-                .getWorkflowSuffixes("FALCON_FEED_REPLICATION_raw-logs");
-        Assert.assertEquals(suffixes, "");
-    }
-
-    @Test
-    public void workflowNameTest() {
-        Feed feed = new Feed();
-        feed.setName("raw-logs");
-
-        WorkflowNameBuilder<Feed> builder = new WorkflowNameBuilder<Feed>(feed);
-        Assert.assertEquals(builder.getWorkflowName().toString(),
-                "FALCON_FEED_raw-logs");
-
-        builder.setTag(Tag.REPLICATION);
-        Assert.assertEquals(builder.getWorkflowName().toString(),
-                "FALCON_FEED_REPLICATION_raw-logs");
-
-        builder.setSuffixes(Arrays.asList("cluster1"));
-        Assert.assertEquals(builder.getWorkflowName().toString(),
-                "FALCON_FEED_REPLICATION_raw-logs_cluster1");
-
-        Process process = new Process();
-        process.setName("agg-logs");
-        WorkflowNameBuilder<Process> processBuilder = new WorkflowNameBuilder<Process>(
-                process);
-        processBuilder.setTag(Tag.DEFAULT);
-        Assert.assertEquals(processBuilder.getWorkflowName().toString(),
-                "FALCON_PROCESS_DEFAULT_agg-logs");
-
-    }
-
-    @Test(dataProvider = "workflowNames")
-    public void workflowNameTypeTest(String wfName, Pair<String, EntityType> nameType) {
-        Assert.assertEquals(WorkflowNameBuilder.WorkflowName.getEntityNameAndType(wfName), nameType);
-    }
-
-    @DataProvider(name = "workflowNames")
-    public Object[][] getWorkflowNames() {
-        return new Object[][] {
-            {"FALCON_PROCESS_DEFAULT_agg-logs", new Pair<>("agg-logs", EntityType.PROCESS)},
-            {"FALCON_FEED_REPLICATION_raw-logs", new Pair<>("raw-logs", EntityType.FEED)},
-            {"FALCON_FEED_RETENTION_logs2", new Pair<>("logs2", EntityType.FEED)},
-            {"FALCON_FEED_REPLICATION_logs_colo1", new Pair<>("logs", EntityType.FEED)},
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/lock/MemoryLocksTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/lock/MemoryLocksTest.java b/common/src/test/java/org/apache/falcon/entity/lock/MemoryLocksTest.java
deleted file mode 100644
index d4cf82c..0000000
--- a/common/src/test/java/org/apache/falcon/entity/lock/MemoryLocksTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.lock;
-
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- * Test for Memory Locking mechanism used for schedule/update of entities.
- */
-
-public class MemoryLocksTest {
-    private static final String FEED_XML = "/config/feed/feed-0.1.xml";
-    private static final String PROCESS_XML = "/config/process/process-0.1.xml";
-
-    @Test
-    public void testSuccessfulMemoryLockAcquisition() throws Exception {
-        MemoryLocks memoryLocks = MemoryLocks.getInstance();
-        Entity feed = (Entity) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED_XML));
-        Assert.assertEquals(memoryLocks.acquireLock(feed, "test"), true);
-        memoryLocks.releaseLock(feed);
-    }
-
-    @Test
-    public void testUnsuccessfulMemoryLockAcquisition() throws Exception {
-        MemoryLocks memoryLocks = MemoryLocks.getInstance();
-        Entity feed = (Entity) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED_XML));
-        Assert.assertEquals(memoryLocks.acquireLock(feed, "test"), true);
-        Assert.assertEquals(memoryLocks.acquireLock(feed, "test"), false);
-        memoryLocks.releaseLock(feed);
-    }
-
-    @Test
-    public void testDuplicateEntityNameLockAcquisition() throws Exception {
-        MemoryLocks memoryLocks = MemoryLocks.getInstance();
-        //In case both feed & process have identical names, they shouldn't clash during updates
-        Entity feed = (Entity) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED_XML));
-        org.apache.falcon.entity.v0.process.Process process = (Process) EntityType.PROCESS.getUnmarshaller().
-                unmarshal(this.getClass().getResource(PROCESS_XML));
-        process.setName(feed.getName());
-        Assert.assertEquals(memoryLocks.acquireLock(feed, "test"), true);
-        Assert.assertEquals(memoryLocks.acquireLock(process, "test"), true);
-        memoryLocks.releaseLock(feed);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/parser/ClusterEntityParserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/ClusterEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/ClusterEntityParserTest.java
deleted file mode 100644
index f98b6e4..0000000
--- a/common/src/test/java/org/apache/falcon/entity/parser/ClusterEntityParserTest.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.catalog.CatalogServiceFactory;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.entity.v0.cluster.Locations;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.mockito.Mockito;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-
-
-/**
- * Test for validating cluster entity parsing.
- */
-public class ClusterEntityParserTest extends AbstractTestBase {
-
-    private final ClusterEntityParser parser = (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-    private static final String CLUSTER_LOCATIONS_BASE_DIR = "/projects/falcon/ClusterEntityParserTestLocations/";
-
-    @Test
-    public void testParse() throws IOException, FalconException, JAXBException {
-
-        InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-
-        Cluster cluster = parser.parse(stream);
-        ClusterHelper.getInterface(cluster, Interfacetype.WRITE)
-                .setEndpoint(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
-        Locations locations = getClusterLocations("staging0", "working0");
-        cluster.setLocations(locations);
-
-        Assert.assertNotNull(cluster);
-        Assert.assertEquals(cluster.getName(), "testCluster");
-
-        Interface execute = ClusterHelper.getInterface(cluster, Interfacetype.EXECUTE);
-
-        Assert.assertEquals(execute.getEndpoint(), "localhost:8021");
-        Assert.assertEquals(execute.getVersion(), "0.20.2");
-
-        Interface readonly = ClusterHelper.getInterface(cluster, Interfacetype.READONLY);
-        Assert.assertEquals(readonly.getEndpoint(), "hftp://localhost:50010");
-        Assert.assertEquals(readonly.getVersion(), "0.20.2");
-
-        Interface write = ClusterHelper.getInterface(cluster, Interfacetype.WRITE);
-        //assertEquals(write.getEndpoint(), conf.get("fs.defaultFS"));
-        Assert.assertEquals(write.getVersion(), "0.20.2");
-
-        Interface workflow = ClusterHelper.getInterface(cluster, Interfacetype.WORKFLOW);
-        Assert.assertEquals(workflow.getEndpoint(), "http://localhost:11000/oozie/");
-        Assert.assertEquals(workflow.getVersion(), "4.0");
-
-        Assert.assertEquals(ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING).getPath(),
-                CLUSTER_LOCATIONS_BASE_DIR + "staging0");
-        Assert.assertEquals(ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
-                CLUSTER_LOCATIONS_BASE_DIR + "working0");
-
-        StringWriter stringWriter = new StringWriter();
-        Marshaller marshaller = EntityType.CLUSTER.getMarshaller();
-        marshaller.marshal(cluster, stringWriter);
-        System.out.println(stringWriter.toString());
-
-        Interface catalog = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY);
-        Assert.assertEquals(catalog.getEndpoint(), "http://localhost:48080/templeton/v1");
-        Assert.assertEquals(catalog.getVersion(), "0.11.0");
-
-
-    }
-
-    @Test
-    public void testParseClusterWithoutRegistry() throws IOException, FalconException, JAXBException {
-
-        StartupProperties.get().setProperty(CatalogServiceFactory.CATALOG_SERVICE, "thrift://localhost:9083");
-        Assert.assertTrue(CatalogServiceFactory.isEnabled());
-
-        InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-no-registry.xml");
-        Cluster cluster = parser.parse(stream);
-
-        Interface catalog = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY);
-        Assert.assertNull(catalog);
-
-        StartupProperties.get().remove(CatalogServiceFactory.CATALOG_SERVICE);
-        Assert.assertFalse(CatalogServiceFactory.isEnabled());
-
-        catalog = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY);
-        Assert.assertNull(catalog);
-    }
-
-    @Test
-    public void testParseClusterWithoutMessaging() throws FalconException {
-        InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-no-messaging.xml");
-
-        // Parse should be successful
-        Cluster cluster = parser.parse(stream);
-
-        Interface messaging = ClusterHelper.getInterface(cluster, Interfacetype.MESSAGING);
-        Assert.assertNull(messaging);
-
-        Assert.assertEquals(ClusterHelper.getMessageBrokerUrl(cluster), ClusterHelper.NO_USER_BROKER_URL);
-    }
-
-    @Test(expectedExceptions = ValidationException.class,
-            expectedExceptionsMessageRegExp = ".*java.net.UnknownHostException.*")
-    public void testParseClusterWithBadWriteInterface() throws Exception {
-        InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-bad-write-endpoint.xml");
-        Cluster cluster = parser.parse(stream);
-        parser.validate(cluster);
-    }
-
-    @Test
-    public void testParseClusterWithBadRegistry() throws Exception {
-        // disable catalog service
-        StartupProperties.get().remove(CatalogServiceFactory.CATALOG_SERVICE);
-        Assert.assertFalse(CatalogServiceFactory.isEnabled());
-
-        InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-bad-registry.xml");
-        Cluster cluster = parser.parse(stream);
-
-        Interface catalog = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY);
-        Assert.assertEquals(catalog.getEndpoint(), "Hcat");
-        Assert.assertEquals(catalog.getVersion(), "0.1");
-    }
-
-    @Test
-    public void testValidateClusterProperties() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-0.1.xml");
-        Cluster cluster = parser.parse(stream);
-
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateLocations(cluster);
-
-        // Good set of properties, should work
-        clusterEntityParser.validateProperties(cluster);
-
-        // add duplicate property, should throw validation exception.
-        Property property1 = new Property();
-        property1.setName("field1");
-        property1.setValue("any value");
-        cluster.getProperties().getProperties().add(property1);
-        try {
-            clusterEntityParser.validate(cluster);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-
-        // Remove duplicate property. It should not throw exception anymore
-        cluster.getProperties().getProperties().remove(property1);
-        clusterEntityParser.validateProperties(cluster);
-
-        // add empty property name, should throw validation exception.
-        property1.setName("");
-        cluster.getProperties().getProperties().add(property1);
-        try {
-            clusterEntityParser.validateProperties(cluster);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-
-    }
-
-    /**
-     * A positive test for validating tags key value pair regex: key=value, key=value.
-     * @throws FalconException
-     */
-    @Test
-    public void testClusterTags() throws FalconException {
-        InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-        Cluster cluster = parser.parse(stream);
-
-        final String tags = cluster.getTags();
-        Assert.assertEquals("consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting", tags);
-
-        final String[] keys = {"consumer", "owner", "_department_type", };
-        final String[] values = {"consumer@xyz.com", "producer@xyz.com", "forecasting", };
-
-        final String[] pairs = tags.split(",");
-        Assert.assertEquals(3, pairs.length);
-        for (int i = 0; i < pairs.length; i++) {
-            String pair = pairs[i].trim();
-            String[] parts = pair.split("=");
-            Assert.assertEquals(keys[i], parts[0]);
-            Assert.assertEquals(values[i], parts[1]);
-        }
-    }
-
-    @Test
-    public void testValidateACLWithNoACLAndAuthorizationEnabled() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            ClusterEntityParser clusterEntityParser =
-                    (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-            Cluster cluster = clusterEntityParser.parse(stream);
-            Assert.assertNotNull(cluster);
-            Assert.assertNull(cluster.getACL());
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test
-    public void testValidateACLAuthorizationEnabled() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/cluster/cluster-no-registry.xml");
-
-            // need a new parser since it caches authorization enabled flag
-            ClusterEntityParser clusterEntityParser =
-                    (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-            Cluster cluster = clusterEntityParser.parse(stream);
-            Assert.assertNotNull(cluster);
-            Assert.assertNotNull(cluster.getACL());
-            Assert.assertNotNull(cluster.getACL().getOwner());
-            Assert.assertNotNull(cluster.getACL().getGroup());
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    /**
-     * A lightweight unit test for a cluster where location type staging is missing.
-     * Extensive tests are found in ClusterEntityValidationIT.
-     *
-     * @throws ValidationException
-     */
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = ".*Unable to find.*")
-    public void testClusterWithoutStaging() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        Locations locations = getClusterLocations(null, "non/existent/path");
-        cluster.setLocations(locations);
-        clusterEntityParser.validate(cluster);
-        Assert.fail("Should have thrown a validation exception");
-    }
-
-    /**
-     * A lightweight unit test for a cluster where location paths are invalid.
-     * Extensive tests are found in ClusterEntityValidationIT.
-     *
-     * @throws ValidationException
-     */
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = ".*Location.*must exist.")
-    public void testClusterWithInvalidLocationsPaths() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster)this.dfsCluster.getCluster().copy();
-        Locations locations = getClusterLocations("non/existent/path", null);
-        cluster.setLocations(locations);
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        clusterEntityParser.validate(cluster);
-        Assert.fail("Should have thrown a validation exception");
-    }
-
-    /**
-     * A lightweight unit test for a cluster where location paths are same.
-     * Extensive tests are found in ClusterEntityValidationIT.
-     *
-     * @throws ValidationException
-     */
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = ".*same path:.*")
-    public void testClusterWithSameWorkingAndStaging() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
-        Locations locations = getClusterLocations("staging1", "staging1");
-        cluster.setLocations(locations);
-        this.dfsCluster.getFileSystem().mkdirs(new Path(cluster.getLocations().getLocations().get(0).getPath()),
-                HadoopClientFactory.ALL_PERMISSION);
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        clusterEntityParser.validate(cluster);
-        Assert.fail("Should have thrown a validation exception");
-    }
-
-    /**
-     * A lightweight unit test for a cluster where location type working is missing.
-     * It should automatically get generated
-     * Extensive tests are found in ClusterEntityValidationIT.
-     */
-    @Test
-    public void testClusterWithOnlyStaging() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
-        Locations locations = getClusterLocations("staging2", null);
-        cluster.setLocations(locations);
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        this.dfsCluster.getFileSystem().mkdirs(new Path(ClusterHelper.getLocation(cluster,
-                ClusterLocationType.STAGING).getPath()), HadoopClientFactory.ALL_PERMISSION);
-        clusterEntityParser.validate(cluster);
-        String workingDirPath = cluster.getLocations().getLocations().get(0).getPath() + "/working";
-        Assert.assertEquals(ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(), workingDirPath);
-        FileStatus workingDirStatus = this.dfsCluster.getFileSystem().getFileLinkStatus(new Path(workingDirPath));
-        Assert.assertTrue(workingDirStatus.isDirectory());
-        Assert.assertEquals(workingDirStatus.getPermission(), HadoopClientFactory.READ_EXECUTE_PERMISSION);
-        Assert.assertEquals(workingDirStatus.getOwner(), UserGroupInformation.getLoginUser().getShortUserName());
-
-        String stagingSubdirFeed = cluster.getLocations().getLocations().get(0).getPath() + "/falcon/workflows/feed";
-        String stagingSubdirProcess =
-                cluster.getLocations().getLocations().get(0).getPath() + "/falcon/workflows/process";
-        FileStatus stagingSubdirFeedStatus =
-                this.dfsCluster.getFileSystem().getFileLinkStatus(new Path(stagingSubdirFeed));
-        FileStatus stagingSubdirProcessStatus =
-                this.dfsCluster.getFileSystem().getFileLinkStatus(new Path(stagingSubdirProcess));
-        Assert.assertTrue(stagingSubdirFeedStatus.isDirectory());
-        Assert.assertEquals(stagingSubdirFeedStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
-        Assert.assertTrue(stagingSubdirProcessStatus.isDirectory());
-        Assert.assertEquals(stagingSubdirProcessStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
-    }
-
-    /**
-     * A lightweight unit test for a cluster where location working is not there and staging
-     * has a subdir which will be used by cluster as working.
-     * Checking for wrong perms of this subdir
-     * Extensive tests are found in ClusterEntityValidationIT.
-     *
-     * @throws ValidationException
-     */
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = ".*rwxr-xr-x.*rwxrwxrwx")
-    public void testClusterWithSubdirInStaging() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
-        Locations locations = getClusterLocations("staging3", null);
-        cluster.setLocations(locations);
-
-        HadoopClientFactory.mkdirs(this.dfsCluster.getFileSystem(),
-                new Path(cluster.getLocations().getLocations().get(0).getPath()),
-                HadoopClientFactory.ALL_PERMISSION);
-        HadoopClientFactory.mkdirs(this.dfsCluster.getFileSystem(),
-                new Path(cluster.getLocations().getLocations().get(0).getPath() + "/working"),
-                HadoopClientFactory.ALL_PERMISSION);
-
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        clusterEntityParser.validate(cluster);
-        Assert.fail("Should have thrown a validation exception");
-    }
-
-    /**
-     * A lightweight unit test for a cluster where staging location
-     * does not have ALL_PERMISSION (777).
-     * Staging has permission less than ALL_PERMISSION
-     * ValidationException should be thrown
-     *
-     * @throws ValidationException
-     */
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = ".*rwxr-xr-x.*rwxrwxrwx")
-    public void testClusterWithStagingPermission() throws Exception {
-        ClusterEntityParser clusterEntityParser = Mockito
-                .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
-        Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
-        Locations locations = getClusterLocations("staging4", null);
-        cluster.setLocations(locations);
-        Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
-        Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
-        this.dfsCluster.getFileSystem().mkdirs(new Path(ClusterHelper.getLocation(cluster,
-                ClusterLocationType.STAGING).getPath()), HadoopClientFactory.READ_EXECUTE_PERMISSION);
-        clusterEntityParser.validate(cluster);
-        Assert.fail("Should have thrown a validation exception");
-    }
-
-    @BeforeClass
-    public void init() throws Exception {
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
-        this.conf = dfsCluster.getConf();
-        this.dfsCluster.getFileSystem().mkdirs(new Path(CLUSTER_LOCATIONS_BASE_DIR));
-    }
-
-    @AfterClass
-    public void tearDown() throws IOException {
-        this.dfsCluster.getFileSystem().delete(new Path(CLUSTER_LOCATIONS_BASE_DIR), true);
-        this.dfsCluster.shutdown();
-    }
-
-    private Locations getClusterLocations(String staging, String working) {
-        Locations locations = new Locations();
-
-        Location loc = new Location();
-        loc.setName(ClusterLocationType.STAGING);
-        if (StringUtils.isNotEmpty(staging)) {
-            loc.setPath(CLUSTER_LOCATIONS_BASE_DIR + staging);
-            locations.getLocations().add(loc);
-        }
-
-        loc = new Location();
-        loc.setName(ClusterLocationType.WORKING);
-        if (StringUtils.isNotEmpty(working)) {
-            loc.setPath(CLUSTER_LOCATIONS_BASE_DIR + working);
-            locations.getLocations().add(loc);
-        }
-
-        return locations;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/parser/DatasourceEntityParserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/DatasourceEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/DatasourceEntityParserTest.java
deleted file mode 100644
index 3893917..0000000
--- a/common/src/test/java/org/apache/falcon/entity/parser/DatasourceEntityParserTest.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.InputStream;
-
-/**
- * Test class for Datasource Entity.
- */
-public class DatasourceEntityParserTest extends AbstractTestBase {
-
-    private EmbeddedCluster cluster;
-    private String hdfsUrl;
-
-    private final DatasourceEntityParser datasourceEntityParser =
-            (DatasourceEntityParser) EntityParserFactory.getParser(EntityType.DATASOURCE);
-    private final FeedEntityParser feedEntityParser =
-            (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-
-    @BeforeClass
-    public void start() throws Exception {
-        cluster = EmbeddedCluster.newCluster("test");
-        hdfsUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
-    }
-
-    @AfterClass
-    public void close() throws Exception {
-        cluster.shutdown();
-    }
-
-    @BeforeMethod
-    public void setup() throws Exception {
-        cleanupStore();
-    }
-
-    @Test
-    public void testDatasourceEntity() throws Exception {
-
-        InputStream stream = this.getClass().getResourceAsStream("/config/datasource/datasource-0.1.xml");
-        Datasource datasource = datasourceEntityParser.parse(stream);
-
-        ConfigurationStore store = ConfigurationStore.get();
-        store.publish(EntityType.DATASOURCE, datasource);
-
-        Datasource databaseEntity = EntityUtil.getEntity(EntityType.DATASOURCE, datasource.getName());
-        Assert.assertEquals("test-hsql-db", datasource.getName());
-        Assert.assertEquals("test-hsql-db", databaseEntity.getName());
-        Assert.assertEquals("hsql", databaseEntity.getType().value());
-        Assert.assertEquals("org.hsqldb.jdbcDriver", databaseEntity.getDriver().getClazz());
-    }
-
-    @Test
-    public void testDatasourcePasswordFileEntity() throws Exception {
-
-        InputStream stream = this.getClass().getResourceAsStream("/config/datasource/datasource-file-0.1.xml");
-        Datasource datasource = datasourceEntityParser.parse(stream);
-        ConfigurationStore store = ConfigurationStore.get();
-        store.publish(EntityType.DATASOURCE, datasource);
-
-        Datasource databaseEntity = EntityUtil.getEntity(EntityType.DATASOURCE, datasource.getName());
-        Assert.assertEquals("test-hsql-db", datasource.getName());
-        Assert.assertEquals("test-hsql-db", databaseEntity.getName());
-        Assert.assertEquals("hsql", databaseEntity.getType().value());
-        Assert.assertEquals("org.hsqldb.jdbcDriver", databaseEntity.getDriver().getClazz());
-    }
-}


[47/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
deleted file mode 100644
index 4f86d9b..0000000
--- a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
+++ /dev/null
@@ -1,466 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.client;
-
-import org.apache.falcon.LifeCycle;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.resource.APIResult;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.EntitySummaryResult;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.InstanceDependencyResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
-import java.util.List;
-
-/**
- * Abstract Client API to submit and manage Falcon Entities (Cluster, Feed, Process) jobs
- * against an Falcon instance.
- */
-public abstract class AbstractFalconClient {
-
-    //SUSPEND CHECKSTYLE CHECK ParameterNumberCheck
-
-    protected static final String FALCON_INSTANCE_ACTION_CLUSTERS = "falcon.instance.action.clusters";
-    protected static final String FALCON_INSTANCE_SOURCE_CLUSTERS = "falcon.instance.source.clusters";
-
-    /**
-     * Submit a new entity. Entities can be of type feed, process or data end
-     * points. Entity definitions are validated structurally against schema and
-     * subsequently for other rules before they are admitted into the system.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param filePath Path for the entity definition
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract APIResult submit(String entityType, String filePath, String doAsUser) throws FalconCLIException,
-            IOException;
-
-    /**
-     * Schedules an submitted process entity immediately.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param entityName Name of the entity.
-     * @param colo Cluster name.
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract APIResult schedule(EntityType entityType, String entityName, String colo, Boolean skipDryRun,
-                                        String doAsuser, String properties) throws FalconCLIException;
-
-    /**
-     * Delete the specified entity.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param entityName Name of the entity.
-     * @param doAsUser Proxy User.
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract APIResult delete(EntityType entityType, String entityName,
-                                     String doAsUser) throws FalconCLIException;
-
-    /**
-     * Validates the submitted entity.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param filePath Path for the entity definition to validate.
-     * @param skipDryRun Dry run.
-     * @param doAsUser Proxy User.
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract APIResult validate(String entityType, String filePath, Boolean skipDryRun,
-                                       String doAsUser) throws FalconCLIException;
-
-    /**
-     * Updates the submitted entity.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param entityName Name of the entity.
-     * @param filePath Path for the entity definition to update.
-     * @param skipDryRun Dry run.
-     * @param doAsUser Proxy User.
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract APIResult update(String entityType, String entityName, String filePath,
-                                                       Boolean skipDryRun, String doAsUser) throws FalconCLIException;
-
-    /**
-     * Get definition of the entity.
-     * @param entityType Entity type. Valid options are cluster, feed or process.
-     * @param entityName Name of the entity.
-     * @param doAsUser Proxy user.
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract Entity getDefinition(String entityType, String entityName,
-                                         String doAsUser) throws FalconCLIException;
-
-
-
-    /**
-     *
-     * @param type entity type
-     * @param entity entity name
-     * @param start start time
-     * @param end end time
-     * @param colo colo name
-     * @param lifeCycles lifecycle of an entity (for ex : feed has replication,eviction).
-     * @param filterBy filter operation can be applied to results
-     * @param orderBy
-     * @param sortOrder sort order can be asc or desc
-     * @param offset offset while displaying results
-     * @param numResults num of Results to output
-     * @param doAsUser proxy user
-     * @param allAttempts To get the instances corresponding to each run-id
-     * @return
-     * @throws FalconCLIException
-     */
-    public abstract InstancesResult getStatusOfInstances(String type, String entity, String start, String end, String
-            colo, List<LifeCycle> lifeCycles, String filterBy, String orderBy, String sortOrder, Integer offset, Integer
-            numResults, String doAsUser, Boolean allAttempts) throws FalconCLIException;
-
-    /**
-     * Suspend an entity.
-     * @param entityType Valid options are feed or process.
-     * @param entityName Name of the entity.
-     * @param colo Colo on which the query should be run.
-     * @param doAsUser proxy user
-     * @return Status of the entity.
-     * @throws FalconCLIException
-     */
-    public abstract APIResult suspend(EntityType entityType, String entityName, String colo, String doAsUser) throws
-            FalconCLIException;
-
-    /**
-     * Resume a supended entity.
-     * @param entityType Valid options are feed or process.
-     * @param entityName Name of the entity.
-     * @param colo Colo on which the query should be run.
-     * @param doAsUser proxy user
-     * @return Result of the resume command.
-     * @throws FalconCLIException
-     */
-    public abstract APIResult resume(EntityType entityType, String entityName, String colo, String doAsUser) throws
-            FalconCLIException;
-
-    /**
-     * Get status of the entity.
-     * @param entityType Valid options are feed or process.
-     * @param entityName Name of the entity.
-     * @param colo Colo on which the query should be run.
-     * @param doAsUser proxy user
-     * @param showScheduler whether the call should return the scheduler on which the entity is scheduled.
-     * @return Status of the entity.
-     * @throws FalconCLIException
-     */
-    public abstract APIResult getStatus(EntityType entityType, String entityName, String colo, String doAsUser,
-                                        boolean showScheduler) throws FalconCLIException;
-
-    /**
-     * Submits and schedules an entity.
-     * @param entityType Valid options are feed or process.
-     * @param filePath Path for the entity definition
-     * @param skipDryRun Optional query param, Falcon skips oozie dryrun when value is set to true.
-     * @param doAsUser proxy user
-     * @return Result of the submit and schedule command.
-     */
-    public abstract APIResult submitAndSchedule(String entityType, String filePath, Boolean skipDryRun, String doAsUser,
-                                       String properties) throws FalconCLIException;
-
-    /**
-     *
-     * Get list of the entities.
-     * We have two filtering parameters for entity tags: "tags" and "tagkeys".
-     * "tags" does the exact match in key=value fashion, while "tagkeys" finds all the entities with the given key as a
-     * substring in the tags. This "tagkeys" filter is introduced for the user who doesn't remember the exact tag but
-     * some keywords in the tag. It also helps users to save the time of typing long tags.
-     * The returned entities will match all the filtering criteria.
-     * @param entityType Comma-separated entity types. Can be empty. Valid entity types are cluster, feed or process.
-     * @param fields <optional param> Fields of entity that the user wants to view, separated by commas.
-     *               Valid options are STATUS, TAGS, PIPELINES, CLUSTERS.
-     * @param nameSubsequence <optional param> Subsequence of entity name. Not case sensitive.
-     *                        The entity name needs to contain all the characters in the subsequence in the same order.
-     *                        Example 1: "sample1" will match the entity named "SampleFeed1-2".
-     *                        Example 2: "mhs" will match the entity named "New-My-Hourly-Summary".
-     * @param tagKeywords <optional param> Keywords in tags, separated by comma. Not case sensitive.
-     *                    The returned entities will have tags that match all the tag keywords.
-     * @param filterTags <optional param> Return list of entities that have specified tags, separated by a comma.
-     *             Query will do AND on tag values.
-     *             Example: tags=consumer=consumer@xyz.com,owner=producer@xyz.com
-     * @param filterBy <optional param> Filter results by list of field:value pairs.
-     *                 Example: filterBy=STATUS:RUNNING,PIPELINES:clickLogs
-     *                 Supported filter fields are NAME, STATUS, PIPELINES, CLUSTER.
-     *                 Query will do an AND among filterBy fields.
-     * @param orderBy <optional param> Field by which results should be ordered.
-     *                Supports ordering by "name".
-     * @param sortOrder <optional param> Valid options are "asc" and "desc"
-     * @param offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-     * @param numResults <optional param> Number of results to show per request, used for pagination. Only
-     *                       integers > 0 are valid, Default is 10.
-     * @param doAsUser proxy user
-     * @return Total number of results and a list of entities.
-     */
-    public abstract EntityList getEntityList(String entityType, String fields, String nameSubsequence, String
-            tagKeywords, String filterBy, String filterTags, String orderBy, String sortOrder, Integer offset, Integer
-            numResults, String doAsUser) throws FalconCLIException;
-
-    /**
-     * Given an EntityType and cluster, get list of entities along with summary of N recent instances of each entity.
-     * @param entityType Valid options are feed or process.
-     * @param cluster Show entities that belong to this cluster.
-     * @param start <optional param> Show entity summaries from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *                 By default, it is set to (end - 2 days).
-     * @param end <optional param> Show entity summary up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *               Default is set to now.
-     * @param fields <optional param> Fields of entity that the user wants to view, separated by commas.
-     *                     Valid options are STATUS, TAGS, PIPELINES.
-     * @param filterBy <optional param> Filter results by list of field:value pairs.
-     *                     Example: filterBy=STATUS:RUNNING,PIPELINES:clickLogs
-     *                     Supported filter fields are NAME, STATUS, PIPELINES, CLUSTER.
-     *                     Query will do an AND among filterBy fields.
-     * @param filterTags <optional param> Return list of entities that have specified tags, separated by a comma.
-     *                   Query will do AND on tag values.
-     *                   Example: tags=consumer=consumer@xyz.com,owner=producer@xyz.com
-     * @param orderBy <optional param> Field by which results should be ordered.
-     *                      Supports ordering by "name".
-     * @param sortOrder <optional param> Valid options are "asc" and "desc"
-     * @param offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-     * @param numInstances <optional param> Number of results to show per request, used for pagination. Only
-     *                    integers > 0 are valid, Default is 10.
-     * @param numResults <optional param> Number of recent instances to show per entity. Only integers > 0 are
-     *                           valid, Default is 7.
-     * @param doAsUser proxy user
-     * @return Show entities along with summary of N instances for each entity.
-     */
-    public abstract EntitySummaryResult getEntitySummary(String entityType, String cluster, String start, String end,
-                                                         String fields, String filterBy, String filterTags, String
-                                                         orderBy, String sortOrder, Integer offset, Integer
-                                                         numResults, Integer numInstances, String doAsUser) throws
-            FalconCLIException;
-
-    /**
-     * Force updates the entity.
-     * @param entityType Valid options are feed or process.
-     * @param entityName Name of the entity.
-     * @param colo Colo on which the query should be run.
-     * @param skipDryRun Optional query param, Falcon skips oozie dryrun when value is set to true.
-     * @param doAsUser proxy user
-     * @return Result of the validation.
-     */
-    public abstract APIResult touch(String entityType, String entityName, String colo, Boolean skipDryRun,
-                                    String doAsUser) throws FalconCLIException;
-
-    /**
-     * Kill currently running instance(s) of an entity.
-     * @param type Valid options are feed or process.
-     * @param entity name of the entity.
-     * @param start start time of the instance(s) that you want to refer to
-     * @param end end time of the instance(s) that you want to refer to
-     * @param colo Colo on which the query should be run.
-     * @param lifeCycles <optional param> can be Eviction/Replication(default) for feed and Execution(default) for
-     *                   process.
-     * @param doAsUser proxy user
-     * @return Result of the kill operation.
-     */
-    public abstract InstancesResult killInstances(String type, String entity, String start, String end, String colo,
-                                                  String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                                  String doAsUser) throws FalconCLIException,
-            UnsupportedEncodingException;
-
-    /**
-     * Suspend instances of an entity.
-     * @param type Valid options are feed or process.
-     * @param entity name of the entity.
-     * @param start the start time of the instance(s) that you want to refer to
-     * @param end the end time of the instance(s) that you want to refer to
-     * @param colo Colo on which the query should be run.
-     * @param lifeCycles <optional param> can be Eviction/Replication(default) for feed and Execution(default) for
-     *                   process.
-     * @param doAsUser proxy user
-     * @return Results of the suspend command.
-     */
-    public abstract InstancesResult suspendInstances(String type, String entity, String start, String end, String colo,
-                                            String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                            String doAsUser) throws FalconCLIException, UnsupportedEncodingException;
-
-    /**
-     * Resume suspended instances of an entity.
-     * @param type Valid options are feed or process.
-     * @param entity name of the entity.
-     * @param start start time of the instance(s) that you want to refer to
-     * @param end the end time of the instance(s) that you want to refer to
-     * @param colo Colo on which the query should be run.
-     * @param lifeCycles <optional param> can be Eviction/Replication(default) for feed and Execution(default) for
-     *                   process.
-     * @param doAsUser proxy user
-     * @return Results of the resume command.
-     */
-    public abstract InstancesResult resumeInstances(String type, String entity, String start, String end, String colo,
-                                           String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                           String doAsUser) throws FalconCLIException, UnsupportedEncodingException;
-
-    /**
-     * Rerun instances of an entity. On issuing a rerun, by default the execution resumes from the last failed node in
-     * the workflow.
-     * @param type Valid options are feed or process.
-     * @param entity name of the entity.
-     * @param start start is the start time of the instance that you want to refer to
-     * @param end end is the end time of the instance that you want to refer to
-     * @param colo Colo on which the query should be run.
-     * @param lifeCycles <optional param> can be Eviction/Replication(default) for feed and Execution(default) for
-     *                   process.
-     * @param isForced <optional param> can be used to forcefully rerun the entire instance.
-     * @param doAsUser proxy user
-     * @return Results of the rerun command.
-     */
-    public abstract InstancesResult rerunInstances(String type, String entity, String start, String end,
-                                                   String filePath, String colo, String clusters,
-                                                   String sourceClusters, List<LifeCycle> lifeCycles, Boolean isForced,
-                                                   String doAsUser) throws FalconCLIException, IOException;
-
-    /**
-     * Get summary of instance/instances of an entity.
-     * @param type Valid options are cluster, feed or process.
-     * @param entity Name of the entity.
-     * @param start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *                 By default, it is set to (end - (10 * entityFrequency)).
-     * @param end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *               Default is set to now.
-     * @param colo <optional param> Colo on which the query should be run.
-     * @param lifeCycles <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process
-     *                   is Execution(default).
-     * @param filterBy <optional param> Filter results by list of field:value pairs.
-     *                 Example1: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-     *                 Example2: filterBy=Status:RUNNING,Status:KILLED
-     *                 Supported filter fields are STATUS, CLUSTER.
-     *                 Query will do an AND among filterBy fields.
-     * @param orderBy <optional param> Field by which results should be ordered.
-     *                Supports ordering by "cluster". Example: orderBy=cluster
-     * @param sortOrder <optional param> Valid options are "asc" and "desc". Example: sortOrder=asc
-     * @param doAsUser proxy user
-     * @return Summary of the instances over the specified time range
-     */
-    public abstract InstancesSummaryResult getSummaryOfInstances(String type, String entity, String start, String end,
-                                                                 String colo, List<LifeCycle> lifeCycles,
-                                                                 String filterBy, String orderBy, String sortOrder,
-                                                                 String doAsUser) throws FalconCLIException;
-
-    /**
-     * Get falcon feed instance availability.
-     * @param type Valid options is feed.
-     * @param entity Name of the entity.
-     * @param start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *              By default, it is set to (end - (10 * entityFrequency)).
-     * @param end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *            Default is set to now.
-     * @param colo Colo on which the query should be run.
-     * @param doAsUser proxy user
-     * @return Feed instance availability status
-     */
-    public abstract FeedInstanceResult getFeedListing(String type, String entity, String start, String end, String colo,
-                                                      String doAsUser) throws FalconCLIException;
-
-    /**
-     * Get log of a specific instance of an entity.
-     * @param type Valid options are cluster, feed or process.
-     * @param entity Name of the entity.
-     * @param start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *                 By default, it is set to (end - (10 * entityFrequency)).
-     * @param end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-     *               Default is set to now.
-     * @param colo <optional param> Colo on which the query should be run.
-     * @param runId <optional param> Run Id.
-     * @param lifeCycles <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is
-     *                   Execution(default).
-     * @param filterBy <optional param> Filter results by list of field:value pairs.
-     *                 Example: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-     *                 Supported filter fields are STATUS, CLUSTER, SOURCECLUSTER, STARTEDAFTER.
-     *                 Query will do an AND among filterBy fields.
-     * @param orderBy <optional param> Field by which results should be ordered.
-     *                Supports ordering by "status","startTime","endTime","cluster".
-     * @param sortOrder <optional param> Valid options are "asc" and "desc"
-     * @param offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-     * @param numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0
-     *                   are valid, Default is 10.
-     * @param doAsUser proxy user
-     * @return Log of specified instance.
-     */
-    public abstract InstancesResult getLogsOfInstances(String type, String entity, String start, String end,
-                                                       String colo, String runId, List<LifeCycle> lifeCycles,
-                                                       String filterBy, String orderBy, String sortOrder,
-                                                       Integer offset, Integer numResults, String doAsUser) throws
-            FalconCLIException;
-
-    //RESUME CHECKSTYLE CHECK ParameterNumberCheck
-
-    /**
-     * Get the params passed to the workflow for an instance of feed/process.
-     * @param type Valid options are cluster, feed or process.
-     * @param entity Name of the entity.
-     * @param start should be the nominal time of the instance for which you want the params to be returned
-     * @param colo <optional param> Colo on which the query should be run.
-     * @param lifeCycles <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is
-     *                   Execution(default).
-     * @param doAsUser proxy user
-     * @return List of instances currently running.
-     */
-    public abstract InstancesResult getParamsOfInstance(String type, String entity, String start, String colo,
-                                                        List<LifeCycle> lifeCycles, String doAsUser) throws
-            FalconCLIException, UnsupportedEncodingException;
-
-    /**
-     * Get dependent instances for a particular instance.
-     * @param entityType Valid options are feed or process.
-     * @param entityName Name of the entity
-     * @param instanceTime <mandatory param> time of the given instance
-     * @param colo Colo on which the query should be run.
-     * @return Dependent instances for the specified instance
-     */
-    public abstract InstanceDependencyResult getInstanceDependencies(String entityType, String entityName,
-                                                                     String instanceTime, String colo) throws
-            FalconCLIException;
-
-    /**
-     * Get version of the falcon server.
-     * @return Version of the server.
-     */
-    public abstract String getVersion(String doAsUser) throws FalconCLIException;
-
-    protected InputStream getServletInputStream(String clusters, String sourceClusters, String properties) throws
-            FalconCLIException, UnsupportedEncodingException {
-
-        InputStream stream;
-        StringBuilder buffer = new StringBuilder();
-        if (clusters != null) {
-            buffer.append(FALCON_INSTANCE_ACTION_CLUSTERS).append('=').append(clusters).append('\n');
-        }
-        if (sourceClusters != null) {
-            buffer.append(FALCON_INSTANCE_SOURCE_CLUSTERS).append('=').append(sourceClusters).append('\n');
-        }
-        if (properties != null) {
-            buffer.append(properties);
-        }
-        stream = new ByteArrayInputStream(buffer.toString().getBytes());
-        return (buffer.length() == 0) ? null : stream;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/client/FalconCLIException.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/client/FalconCLIException.java b/client/src/main/java/org/apache/falcon/client/FalconCLIException.java
deleted file mode 100644
index 29efbae..0000000
--- a/client/src/main/java/org/apache/falcon/client/FalconCLIException.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.client;
-
-import com.sun.jersey.api.client.ClientResponse;
-import org.apache.falcon.resource.APIResult;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Exception thrown by FalconClient.
- */
-public class FalconCLIException extends Exception {
-
-    private static final int MB = 1024 * 1024;
-
-    public FalconCLIException(String msg) {
-        super(msg);
-    }
-
-    public FalconCLIException(Throwable e) {
-        super(e);
-    }
-
-    public FalconCLIException(String msg, Throwable throwable) {
-        super(msg, throwable);
-    }
-
-    public static FalconCLIException fromReponse(ClientResponse clientResponse) {
-        ClientResponse.Status status = clientResponse.getClientResponseStatus();
-        String statusValue = status.toString();
-        String message = "";
-        if (status == ClientResponse.Status.BAD_REQUEST) {
-            clientResponse.bufferEntity();
-            InputStream in = clientResponse.getEntityInputStream();
-            try {
-                in.mark(MB);
-                message = clientResponse.getEntity(APIResult.class).getMessage();
-            } catch (Throwable th) {
-                byte[] data = new byte[MB];
-                try {
-                    in.reset();
-                    int len = in.read(data);
-                    message = new String(data, 0, len);
-                } catch (IOException e) {
-                    message = e.getMessage();
-                }
-            }
-        }
-        return new FalconCLIException(statusValue + ";" + message);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/client/FalconClient.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/client/FalconClient.java b/client/src/main/java/org/apache/falcon/client/FalconClient.java
deleted file mode 100644
index 597f608..0000000
--- a/client/src/main/java/org/apache/falcon/client/FalconClient.java
+++ /dev/null
@@ -1,1057 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.client;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.io.UnsupportedEncodingException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.security.SecureRandom;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLSession;
-import javax.net.ssl.TrustManager;
-import javax.ws.rs.HttpMethod;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriBuilder;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.net.util.TrustManagerUtils;
-import org.apache.falcon.LifeCycle;
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.cli.FalconMetadataCLI;
-import org.apache.falcon.entity.v0.DateValidator;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.metadata.RelationshipType;
-import org.apache.falcon.recipe.RecipeTool;
-import org.apache.falcon.recipe.RecipeToolArgs;
-import org.apache.falcon.resource.APIResult;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.EntitySummaryResult;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.FeedLookupResult;
-import org.apache.falcon.resource.InstanceDependencyResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-import org.apache.falcon.resource.LineageGraphResult;
-import org.apache.falcon.resource.SchedulableEntityInstanceResult;
-import org.apache.falcon.resource.TriageResult;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.client.urlconnection.HTTPSProperties;
-
-/**
- * Client API to submit and manage Falcon Entities (Cluster, Feed, Process) jobs
- * against an Falcon instance.
- */
-public class FalconClient extends AbstractFalconClient {
-
-    public static final AtomicReference<PrintStream> OUT = new AtomicReference<>(System.out);
-
-    public static final String WS_HEADER_PREFIX = "header:";
-    public static final String USER = System.getProperty("user.name");
-    public static final String AUTH_URL = "api/options?" + PseudoAuthenticator.USER_NAME + "=" + USER;
-
-
-
-    public static final String PATH = "path";
-    public static final String COLO = "colo";
-    private static final String KEY = "key";
-    private static final String VALUE = "value";
-    public static final String CLUSTER = "cluster";
-    public static final String RUN_ID = "runid";
-    public static final String FORCE = "force";
-    public static final String SHOW_SCHEDULER = "showScheduler";
-    public static final String ENTITY_NAME = "name";
-    public static final String SKIP_DRYRUN = "skipDryRun";
-    public static final String FILTER_BY = "filterBy";
-    public static final String ORDER_BY = "orderBy";
-    public static final String SORT_ORDER = "sortOrder";
-    public static final String OFFSET = "offset";
-    public static final String NUM_RESULTS = "numResults";
-    public static final String START = "start";
-    public static final String END = "end";
-    public static final String INSTANCE_TIME = "instanceTime";
-    public static final String PROPERTIES = "properties";
-    private static final String FIELDS = "fields";
-    private static final String NAME_SUBSEQUENCE = "nameseq";
-    private static final String FILTER_TAGS = "tags";
-    private static final String TAG_KEYWORDS = "tagkeys";
-    private static final String LIFECYCLE = "lifecycle";
-    private static final String NUM_INSTANCES = "numInstances";
-    public static final String ALL_ATTEMPTS = "allAttempts";
-
-
-
-
-    public static final String DO_AS_OPT = "doAs";
-    /**
-     * Name of the HTTP cookie used for the authentication token between the client and the server.
-     */
-    public static final String AUTH_COOKIE = "hadoop.auth";
-    private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
-
-    private static final KerberosAuthenticator AUTHENTICATOR = new KerberosAuthenticator();
-    private static final String TEMPLATE_SUFFIX = "-template.xml";
-
-
-    private static final String PROPERTIES_SUFFIX = ".properties";
-    public static final HostnameVerifier ALL_TRUSTING_HOSTNAME_VERIFIER = new HostnameVerifier() {
-        @Override
-        public boolean verify(String hostname, SSLSession sslSession) {
-            return true;
-        }
-    };
-    private final WebResource service;
-    private final AuthenticatedURL.Token authenticationToken;
-
-    /**
-     * debugMode=false means no debugging. debugMode=true means debugging on.
-     */
-    private boolean debugMode = false;
-
-    private final Properties clientProperties;
-
-    /**
-     * Create a Falcon client instance.
-     *
-     * @param falconUrl of the server to which client interacts
-     * @throws FalconCLIException - If unable to initialize SSL Props
-     */
-    public FalconClient(String falconUrl) throws FalconCLIException {
-        this(falconUrl, new Properties());
-    }
-
-    /**
-     * Create a Falcon client instance.
-     *
-     * @param falconUrl of the server to which client interacts
-     * @param properties client properties
-     * @throws FalconCLIException - If unable to initialize SSL Props
-     */
-    public FalconClient(String falconUrl, Properties properties) throws FalconCLIException {
-        try {
-            String baseUrl = notEmpty(falconUrl, "FalconUrl");
-            if (!baseUrl.endsWith("/")) {
-                baseUrl += "/";
-            }
-            this.clientProperties = properties;
-            SSLContext sslContext = getSslContext();
-            DefaultClientConfig config = new DefaultClientConfig();
-            config.getProperties().put(HTTPSProperties.PROPERTY_HTTPS_PROPERTIES,
-                    new HTTPSProperties(ALL_TRUSTING_HOSTNAME_VERIFIER, sslContext)
-            );
-            Client client = Client.create(config);
-            client.setConnectTimeout(Integer.parseInt(clientProperties.getProperty("falcon.connect.timeout",
-                    "180000")));
-            client.setReadTimeout(Integer.parseInt(clientProperties.getProperty("falcon.read.timeout", "180000")));
-            service = client.resource(UriBuilder.fromUri(baseUrl).build());
-            client.resource(UriBuilder.fromUri(baseUrl).build());
-            authenticationToken = getToken(baseUrl);
-        } catch (Exception e) {
-            throw new FalconCLIException("Unable to initialize Falcon Client object. Cause : " + e.getMessage(), e);
-        }
-    }
-
-    private static SSLContext getSslContext() throws Exception {
-        SSLContext sslContext = SSLContext.getInstance("SSL");
-        sslContext.init(
-                null,
-                new TrustManager[]{TrustManagerUtils.getValidateServerCertificateTrustManager()},
-                new SecureRandom());
-        return sslContext;
-    }
-
-    /**
-     * @return current debug Mode
-     */
-    public boolean getDebugMode() {
-        return debugMode;
-    }
-
-    /**
-     * Set debug mode.
-     *
-     * @param debugMode : debugMode=false means no debugging. debugMode=true means debugging on
-     */
-    public void setDebugMode(boolean debugMode) {
-        this.debugMode = debugMode;
-    }
-
-    public static AuthenticatedURL.Token getToken(String baseUrl) throws FalconCLIException {
-        AuthenticatedURL.Token currentToken = new AuthenticatedURL.Token();
-        try {
-            URL url = new URL(baseUrl + AUTH_URL);
-            // using KerberosAuthenticator which falls back to PsuedoAuthenticator
-            // instead of passing authentication type from the command line - bad factory
-            HttpsURLConnection.setDefaultSSLSocketFactory(getSslContext().getSocketFactory());
-            HttpsURLConnection.setDefaultHostnameVerifier(ALL_TRUSTING_HOSTNAME_VERIFIER);
-            new AuthenticatedURL(AUTHENTICATOR).openConnection(url, currentToken);
-        } catch (Exception ex) {
-            throw new FalconCLIException("Could not authenticate, " + ex.getMessage(), ex);
-        }
-
-        return currentToken;
-    }
-
-
-    /**
-     * Methods allowed on Entity Resources.
-     */
-    protected static enum Entities {
-        VALIDATE("api/entities/validate/", HttpMethod.POST, MediaType.TEXT_XML),
-        SUBMIT("api/entities/submit/", HttpMethod.POST, MediaType.TEXT_XML),
-        UPDATE("api/entities/update/", HttpMethod.POST, MediaType.TEXT_XML),
-        SUBMITANDSCHEDULE("api/entities/submitAndSchedule/", HttpMethod.POST, MediaType.TEXT_XML),
-        SCHEDULE("api/entities/schedule/", HttpMethod.POST, MediaType.TEXT_XML),
-        SUSPEND("api/entities/suspend/", HttpMethod.POST, MediaType.TEXT_XML),
-        RESUME("api/entities/resume/", HttpMethod.POST, MediaType.TEXT_XML),
-        DELETE("api/entities/delete/", HttpMethod.DELETE, MediaType.TEXT_XML),
-        STATUS("api/entities/status/", HttpMethod.GET, MediaType.TEXT_XML),
-        DEFINITION("api/entities/definition/", HttpMethod.GET, MediaType.TEXT_XML),
-        LIST("api/entities/list/", HttpMethod.GET, MediaType.TEXT_XML),
-        SUMMARY("api/entities/summary", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        LOOKUP("api/entities/lookup/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        DEPENDENCY("api/entities/dependencies/", HttpMethod.GET, MediaType.TEXT_XML),
-        SLA("api/entities/sla-alert", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        TOUCH("api/entities/touch", HttpMethod.POST, MediaType.TEXT_XML);
-
-        private String path;
-        private String method;
-        private String mimeType;
-
-        Entities(String path, String method, String mimeType) {
-            this.path = path;
-            this.method = method;
-            this.mimeType = mimeType;
-        }
-    }
-
-    /**
-     * Methods allowed on Metadata Discovery Resources.
-     */
-    protected static enum MetadataOperations {
-
-        LIST("api/metadata/discovery/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        RELATIONS("api/metadata/discovery/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        VERTICES("api/metadata/lineage/vertices", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        EDGES("api/metadata/lineage/edges", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        LINEAGE("api/metadata/lineage/entities", HttpMethod.GET, MediaType.APPLICATION_JSON);
-
-        private String path;
-        private String method;
-        private String mimeType;
-
-        MetadataOperations(String path, String method, String mimeType) {
-            this.path = path;
-            this.method = method;
-            this.mimeType = mimeType;
-        }
-    }
-
-    /**
-     * Methods allowed on Process Instance Resources.
-     */
-    protected static enum Instances {
-        RUNNING("api/instance/running/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        STATUS("api/instance/status/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        LIST("api/instance/list", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        KILL("api/instance/kill/", HttpMethod.POST, MediaType.APPLICATION_JSON),
-        SUSPEND("api/instance/suspend/", HttpMethod.POST, MediaType.APPLICATION_JSON),
-        RESUME("api/instance/resume/", HttpMethod.POST, MediaType.APPLICATION_JSON),
-        RERUN("api/instance/rerun/", HttpMethod.POST, MediaType.APPLICATION_JSON),
-        LOG("api/instance/logs/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        SUMMARY("api/instance/summary/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        PARAMS("api/instance/params/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        DEPENDENCY("api/instance/dependencies/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        TRIAGE("api/instance/triage/", HttpMethod.GET, MediaType.APPLICATION_JSON),
-        LISTING("api/instance/listing/", HttpMethod.GET, MediaType.APPLICATION_JSON);
-
-        private String path;
-        private String method;
-        private String mimeType;
-
-        Instances(String path, String method, String mimeType) {
-            this.path = path;
-            this.method = method;
-            this.mimeType = mimeType;
-        }
-    }
-
-    protected static enum AdminOperations {
-
-        STACK("api/admin/stack", HttpMethod.GET, MediaType.TEXT_PLAIN),
-        VERSION("api/admin/version", HttpMethod.GET, MediaType.APPLICATION_JSON);
-
-        private String path;
-        private String method;
-        private String mimeType;
-
-        AdminOperations(String path, String method, String mimeType) {
-            this.path = path;
-            this.method = method;
-            this.mimeType = mimeType;
-        }
-    }
-
-    public String notEmpty(String str, String name) {
-        if (str == null) {
-
-            throw new IllegalArgumentException(name + " cannot be null");
-        }
-        if (str.length() == 0) {
-            throw new IllegalArgumentException(name + " cannot be empty");
-        }
-        return str;
-    }
-
-    public APIResult schedule(EntityType entityType, String entityName, String colo,
-                              Boolean skipDryRun, String doAsUser, String properties) throws FalconCLIException {
-        String type = entityType.toString().toLowerCase();
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.SCHEDULE.path, type, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(SKIP_DRYRUN, skipDryRun)
-            .addQueryParam(PROPERTIES, properties).addQueryParam(DO_AS_OPT, doAsUser).call(Entities.SCHEDULE);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult suspend(EntityType entityType, String entityName, String colo, String doAsUser)
-        throws FalconCLIException {
-        String type = entityType.toString().toLowerCase();
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.SUSPEND.path, type, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(DO_AS_OPT, doAsUser).call(Entities.SUSPEND);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult resume(EntityType entityType, String entityName, String colo, String doAsUser)
-        throws FalconCLIException {
-        String type = entityType.toString().toLowerCase();
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.RESUME.path, type, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(DO_AS_OPT, doAsUser).call(Entities.RESUME);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult delete(EntityType entityType, String entityName, String doAsUser) throws FalconCLIException {
-        String type = entityType.toString().toLowerCase();
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.DELETE.path, type, entityName)
-            .addQueryParam(DO_AS_OPT, doAsUser).call(Entities.DELETE);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult validate(String entityType, String filePath, Boolean skipDryRun, String doAsUser)
-        throws FalconCLIException {
-        InputStream entityStream = getServletInputStream(filePath);
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.VALIDATE.path, entityType)
-            .addQueryParam(SKIP_DRYRUN, skipDryRun).addQueryParam(DO_AS_OPT, doAsUser)
-            .call(Entities.VALIDATE, entityStream);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult submit(String entityType, String filePath, String doAsUser)
-        throws FalconCLIException {
-        InputStream entityStream = getServletInputStream(filePath);
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.SUBMIT.path, entityType)
-            .addQueryParam(DO_AS_OPT, doAsUser).call(Entities.SUBMIT, entityStream);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult update(String entityType, String entityName, String filePath,
-                            Boolean skipDryRun, String doAsUser) throws FalconCLIException {
-        InputStream entityStream = getServletInputStream(filePath);
-        Entities operation = Entities.UPDATE;
-        ClientResponse clientResponse = new ResourceBuilder().path(operation.path, entityType, entityName)
-            .addQueryParam(SKIP_DRYRUN, skipDryRun).addQueryParam(DO_AS_OPT, doAsUser)
-            .call(operation, entityStream);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    @Override
-    public APIResult submitAndSchedule(String entityType, String filePath, Boolean skipDryRun,
-                                       String doAsUser, String properties) throws FalconCLIException {
-        InputStream entityStream = getServletInputStream(filePath);
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.SUBMITANDSCHEDULE.path, entityType)
-            .addQueryParam(SKIP_DRYRUN, skipDryRun).addQueryParam(DO_AS_OPT, doAsUser)
-            .addQueryParam(PROPERTIES, properties).call(Entities.SUBMITANDSCHEDULE, entityStream);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public APIResult getStatus(EntityType entityType, String entityName, String colo,
-                               String doAsUser, boolean showScheduler) throws FalconCLIException {
-        String type = entityType.toString().toLowerCase();
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.STATUS.path, type, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(DO_AS_OPT, doAsUser)
-            .addQueryParam(SHOW_SCHEDULER, showScheduler).call(Entities.STATUS);
-
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public Entity getDefinition(String entityType, String entityName, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.DEFINITION.path, entityType, entityName)
-            .call(Entities.DEFINITION);
-        String entity = getResponseAsString(clientResponse);
-        return Entity.fromString(EntityType.getEnum(entityType), entity);
-    }
-
-    public EntityList getDependency(String entityType, String entityName, String doAsUser)
-        throws FalconCLIException {
-
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.DEPENDENCY.path, entityType, entityName)
-            .addQueryParam(DO_AS_OPT, doAsUser).call(Entities.DEPENDENCY);
-
-        printClientResponse(clientResponse);
-        checkIfSuccessful(clientResponse);
-
-        EntityList result = clientResponse.getEntity(EntityList.class);
-        if (result == null || result.getElements() == null) {
-            return null;
-        }
-        return result;
-    }
-
-    //SUSPEND CHECKSTYLE CHECK ParameterNumberCheck
-
-    public SchedulableEntityInstanceResult getFeedSlaMissPendingAlerts(String entityType, String entityName,
-                                           String startTime, String endTime, String colo) throws FalconCLIException {
-        ClientResponse clientResponse  = new ResourceBuilder().path(Entities.SLA.path, entityType)
-            .addQueryParam(START, startTime).addQueryParam(COLO, colo).addQueryParam(END, endTime)
-            .addQueryParam(ENTITY_NAME, entityName).call(Entities.SLA);
-        return getResponse(SchedulableEntityInstanceResult.class, clientResponse);
-    }
-
-    public TriageResult triage(String entityType, String entityName, String instanceTime,
-                               String colo) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.TRIAGE.path, entityType, entityName)
-            .addQueryParam(START, instanceTime).addQueryParam(COLO, colo).call(Instances.TRIAGE);
-        return getResponse(TriageResult.class, clientResponse);
-    }
-
-    @Override
-    public EntityList getEntityList(String entityType, String fields, String nameSubsequence, String tagKeywords,
-                                    String filterBy, String filterTags, String orderBy, String sortOrder,
-                                    Integer offset, Integer numResults, String doAsUser) throws FalconCLIException {
-        Entities operation = Entities.LIST;
-        ClientResponse clientResponse = new ResourceBuilder().path(operation.path, entityType)
-            .addQueryParam(DO_AS_OPT, doAsUser).addQueryParam(NUM_RESULTS, numResults)
-            .addQueryParam(OFFSET, offset).addQueryParam(SORT_ORDER, sortOrder)
-            .addQueryParam(ORDER_BY, orderBy).addQueryParam(FILTER_BY, filterBy)
-            .addQueryParam(FIELDS, fields).addQueryParam(NAME_SUBSEQUENCE, nameSubsequence)
-            .addQueryParam(TAG_KEYWORDS, tagKeywords).addQueryParam(FILTER_TAGS, filterTags)
-            .call(operation);
-
-        printClientResponse(clientResponse);
-        checkIfSuccessful(clientResponse);
-
-        EntityList result = clientResponse.getEntity(EntityList.class);
-        if (result == null || result.getElements() == null) {
-            return null;
-        }
-        return result;
-    }
-
-    @Override
-    public EntitySummaryResult getEntitySummary(String entityType, String cluster, String start, String end,
-                                   String fields, String filterBy, String filterTags,
-                                   String orderBy, String sortOrder, Integer offset, Integer numResults,
-                                   Integer numInstances, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Entities.SUMMARY.path, entityType)
-            .addQueryParam(CLUSTER, cluster).addQueryParam(START, start).addQueryParam(END, end)
-            .addQueryParam(SORT_ORDER, sortOrder).addQueryParam(ORDER_BY, orderBy)
-            .addQueryParam(OFFSET, offset).addQueryParam(NUM_RESULTS, numResults)
-            .addQueryParam(DO_AS_OPT, doAsUser).addQueryParam(FILTER_BY, filterBy)
-            .addQueryParam(NUM_INSTANCES, numInstances).addQueryParam(FIELDS, fields)
-            .addQueryParam(FILTER_TAGS, filterTags).call(Entities.SUMMARY);
-        return getResponse(EntitySummaryResult.class, clientResponse);
-    }
-
-    @Override
-    public APIResult touch(String entityType, String entityName, String colo,
-                           Boolean skipDryRun, String doAsUser) throws FalconCLIException {
-        Entities operation = Entities.TOUCH;
-        ClientResponse clientResponse = new ResourceBuilder().path(operation.path, entityType, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(SKIP_DRYRUN, skipDryRun)
-            .addQueryParam(DO_AS_OPT, doAsUser).call(operation);
-        return getResponse(APIResult.class, clientResponse);
-    }
-
-    public InstancesResult getRunningInstances(String type, String entity, String colo, List<LifeCycle> lifeCycles,
-                                      String filterBy, String orderBy, String sortOrder,
-                                      Integer offset, Integer numResults, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.RUNNING.path, type, entity)
-            .addQueryParam(FILTER_BY, filterBy).addQueryParam(ORDER_BY, orderBy)
-            .addQueryParam(SORT_ORDER, sortOrder).addQueryParam(OFFSET, offset)
-            .addQueryParam(NUM_RESULTS, numResults).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.RUNNING);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    @Override
-    public InstancesResult getStatusOfInstances(String type, String entity, String start, String end, String colo,
-                                                List<LifeCycle> lifeCycles, String filterBy, String orderBy,
-                                                String sortOrder, Integer offset, Integer numResults,
-                                                String doAsUser, Boolean allAttempts) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.STATUS.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(FILTER_BY, filterBy)
-            .addQueryParam(ORDER_BY, orderBy).addQueryParam(SORT_ORDER, sortOrder)
-            .addQueryParam(OFFSET, offset).addQueryParam(NUM_RESULTS, numResults)
-            .addQueryParam(ALL_ATTEMPTS, allAttempts).addQueryParam(USER, doAsUser).call(Instances.STATUS);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesSummaryResult getSummaryOfInstances(String type, String entity,
-                                        String start, String end,
-                                        String colo, List<LifeCycle> lifeCycles,
-                                        String filterBy, String orderBy, String sortOrder,
-                                        String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.SUMMARY.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.SUMMARY);
-        return getResponse(InstancesSummaryResult.class, clientResponse);
-    }
-
-    public FeedInstanceResult getFeedListing(String type, String entity, String start,
-                                     String end, String colo, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.KILL.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(USER, doAsUser).call(Instances.LISTING);
-        return getResponse(FeedInstanceResult.class, clientResponse);
-    }
-
-    public InstancesResult killInstances(String type, String entity, String start,
-                                String end, String colo, String clusters,
-                                String sourceClusters, List<LifeCycle> lifeCycles,
-                                String doAsUser) throws FalconCLIException, UnsupportedEncodingException {
-        InputStream props = getServletInputStream(clusters, sourceClusters, null);
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.KILL.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.KILL, props);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesResult suspendInstances(String type, String entity, String start, String end, String colo,
-                                            String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                            String doAsUser) throws FalconCLIException, UnsupportedEncodingException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.SUSPEND.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.SUSPEND);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesResult resumeInstances(String type, String entity, String start, String end, String colo,
-                                           String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                           String doAsUser) throws FalconCLIException, UnsupportedEncodingException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.RESUME.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(USER, doAsUser).call(Instances.RESUME);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesResult rerunInstances(String type, String entity, String start,
-                                 String end, String filePath, String colo,
-                                 String clusters, String sourceClusters, List<LifeCycle> lifeCycles,
-                                 Boolean isForced, String doAsUser)
-        throws FalconCLIException, IOException {
-
-        StringBuilder buffer = new StringBuilder();
-        if (filePath != null) {
-            BufferedReader in = null;
-            try {
-                in = new BufferedReader(new FileReader(filePath));
-
-                String str;
-                while ((str = in.readLine()) != null) {
-                    buffer.append(str).append("\n");
-                }
-            } finally {
-                IOUtils.closeQuietly(in);
-            }
-        }
-        String temp = (buffer.length() == 0) ? null : buffer.toString();
-        InputStream props = getServletInputStream(clusters, sourceClusters, temp);
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.RERUN.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(LIFECYCLE, lifeCycles, type).addQueryParam(FORCE, isForced)
-            .addQueryParam(USER, doAsUser).call(Instances.RERUN, props);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesResult getLogsOfInstances(String type, String entity, String start,
-                                              String end, String colo, String runId,
-                                              List<LifeCycle> lifeCycles, String filterBy,
-                                              String orderBy, String sortOrder, Integer offset,
-                                              Integer numResults, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.LOG.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(END, end).addQueryParam(COLO, colo)
-            .addQueryParam(RUN_ID, runId).addQueryParam(LIFECYCLE, lifeCycles, type)
-            .addQueryParam(FILTER_BY, filterBy).addQueryParam(ORDER_BY, orderBy)
-            .addQueryParam(SORT_ORDER, sortOrder).addQueryParam(OFFSET, offset)
-            .addQueryParam(NUM_RESULTS, numResults).addQueryParam(USER, doAsUser).call(Instances.LOG);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public InstancesResult getParamsOfInstance(String type, String entity,
-                                      String start, String colo,
-                                      List<LifeCycle> lifeCycles,
-                                      String doAsUser)
-        throws FalconCLIException, UnsupportedEncodingException {
-        if (!DateValidator.validate(start)) {
-            throw new FalconCLIException("Start date is mandatory and should be"
-                    + " a valid date in  YYYY-MM-DDTHH:MMZ format.");
-        }
-
-        ClientResponse clientResponse = new ResourceBuilder().path(Instances.PARAMS.path, type, entity)
-            .addQueryParam(START, start).addQueryParam(LIFECYCLE, lifeCycles, type)
-            .addQueryParam(USER, doAsUser).call(Instances.PARAMS);
-        return getResponse(InstancesResult.class, clientResponse);
-    }
-
-    public String getThreadDump(String doAsUser) throws FalconCLIException {
-        return sendAdminRequest(AdminOperations.STACK, doAsUser);
-    }
-
-    @Override
-    public String getVersion(String doAsUser) throws FalconCLIException {
-        return sendAdminRequest(AdminOperations.VERSION, doAsUser);
-    }
-
-    public int getStatus(String doAsUser) throws FalconCLIException {
-        AdminOperations job =  AdminOperations.VERSION;
-        ClientResponse clientResponse = new ResourceBuilder().path(job.path).addQueryParam(DO_AS_OPT, doAsUser)
-            .call(job);
-        printClientResponse(clientResponse);
-        return clientResponse.getStatus();
-    }
-
-    public String getDimensionList(String dimensionType, String cluster, String doAsUser) throws FalconCLIException {
-        return sendMetadataDiscoveryRequest(MetadataOperations.LIST, dimensionType, null, cluster, doAsUser);
-    }
-
-    public String getReplicationMetricsDimensionList(String schedEntityType, String schedEntityName,
-                                                     Integer numResults, String doAsUser) throws FalconCLIException {
-        return sendRequestForReplicationMetrics(MetadataOperations.LIST,
-                schedEntityType, schedEntityName, numResults, doAsUser);
-    }
-
-    public LineageGraphResult getEntityLineageGraph(String pipelineName, String doAsUser) throws FalconCLIException {
-        MetadataOperations operation = MetadataOperations.LINEAGE;
-        ClientResponse clientResponse = new ResourceBuilder().path(operation.path).addQueryParam(DO_AS_OPT, doAsUser)
-            .addQueryParam(FalconMetadataCLI.PIPELINE_OPT, pipelineName).call(operation);
-        printClientResponse(clientResponse);
-        checkIfSuccessful(clientResponse);
-        return clientResponse.getEntity(LineageGraphResult.class);
-    }
-
-    public String getDimensionRelations(String dimensionType, String dimensionName,
-                                        String doAsUser) throws FalconCLIException {
-        return sendMetadataDiscoveryRequest(MetadataOperations.RELATIONS, dimensionType, dimensionName, null, doAsUser);
-    }
-
-    /**
-     * Converts a InputStream into ServletInputStream.
-     *
-     * @param filePath - Path of file to stream
-     * @return ServletInputStream
-     * @throws FalconCLIException
-     */
-    private InputStream getServletInputStream(String filePath)
-        throws FalconCLIException {
-
-        if (filePath == null) {
-            return null;
-        }
-        InputStream stream;
-        try {
-            stream = new FileInputStream(filePath);
-        } catch (FileNotFoundException e) {
-            throw new FalconCLIException("File not found:", e);
-        }
-        return stream;
-    }
-
-    private <T extends APIResult> T getResponse(Class<T> clazz,
-                                                ClientResponse clientResponse) throws FalconCLIException {
-        printClientResponse(clientResponse);
-        checkIfSuccessful(clientResponse);
-        return clientResponse.getEntity(clazz);
-    }
-
-    private String getResponseAsString(ClientResponse clientResponse) throws FalconCLIException {
-        printClientResponse(clientResponse);
-        checkIfSuccessful(clientResponse);
-        return clientResponse.getEntity(String.class);
-    }
-
-    private class ResourceBuilder {
-        WebResource resource;
-
-        private ResourceBuilder path(String... paths) {
-            for (String path : paths) {
-                if (resource == null) {
-                    resource = service.path(path);
-                } else {
-                    resource = resource.path(path);
-                }
-            }
-            return this;
-        }
-
-        public ResourceBuilder addQueryParam(String paramName, Integer value) {
-            if (value != null) {
-                resource = resource.queryParam(paramName, value.toString());
-            }
-            return this;
-        }
-
-        public ResourceBuilder addQueryParam(String paramName, Boolean paramValue) {
-            if (paramValue != null) {
-                resource = resource.queryParam(paramName, String.valueOf(paramValue));
-            }
-            return this;
-        }
-
-        public ResourceBuilder addQueryParam(String paramName, String paramValue) {
-            if (StringUtils.isNotBlank(paramValue)) {
-                resource = resource.queryParam(paramName, paramValue);
-            }
-            return this;
-        }
-
-        public ResourceBuilder addQueryParam(String paramName, List<LifeCycle> lifeCycles,
-                                             String type) throws FalconCLIException {
-            if (lifeCycles != null) {
-                checkLifeCycleOption(lifeCycles, type);
-                for (LifeCycle lifeCycle : lifeCycles) {
-                    resource = resource.queryParam(paramName, lifeCycle.toString());
-                }
-            }
-            return this;
-        }
-
-        private ClientResponse call(Entities entities) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(entities.mimeType).type(MediaType.TEXT_XML)
-                .method(entities.method, ClientResponse.class);
-        }
-
-        public ClientResponse call(AdminOperations operation) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(MediaType.TEXT_XML)
-                .method(operation.method, ClientResponse.class);
-        }
-
-        private ClientResponse call(MetadataOperations metadataOperations) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(metadataOperations.mimeType).type(MediaType.TEXT_XML)
-                .method(metadataOperations.method, ClientResponse.class);
-        }
-
-        public ClientResponse call(Instances operation) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(MediaType.TEXT_XML)
-                .method(operation.method, ClientResponse.class);
-        }
-
-        public ClientResponse call(Entities operation, InputStream entityStream) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(MediaType.TEXT_XML)
-                .method(operation.method, ClientResponse.class, entityStream);
-        }
-
-        public ClientResponse call(Instances operation, InputStream entityStream) {
-            return resource.header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(MediaType.TEXT_XML)
-                .method(operation.method, ClientResponse.class, entityStream);
-        }
-    }
-
-    public FeedLookupResult reverseLookUp(String type, String path, String doAsUser) throws FalconCLIException {
-        Entities api = Entities.LOOKUP;
-        ClientResponse response = new ResourceBuilder().path(api.path, type).addQueryParam(DO_AS_OPT, doAsUser)
-            .addQueryParam(PATH, path).call(api);
-        return getResponse(FeedLookupResult.class, response);
-    }
-
-    public FeedInstanceResult getFeedInstanceListing(String type, String entity, String start, String end, String colo
-            , String doAsUser) throws FalconCLIException {
-
-        checkType(type);
-        Instances api = Instances.LISTING;
-        ClientResponse clientResponse = new ResourceBuilder().path(api.path, type, entity)
-            .addQueryParam(COLO, colo).addQueryParam(DO_AS_OPT, doAsUser).addQueryParam(START, start)
-            .addQueryParam(END, end).call(api);
-        return getResponse(FeedInstanceResult.class, clientResponse);
-    }
-
-
-    public InstanceDependencyResult getInstanceDependencies(String entityType, String entityName, String instanceTime,
-                                                            String colo) throws FalconCLIException {
-        checkType(entityType);
-        Instances api = Instances.DEPENDENCY;
-        ClientResponse clientResponse = new ResourceBuilder().path(api.path, entityType, entityName)
-            .addQueryParam(COLO, colo).addQueryParam(INSTANCE_TIME, instanceTime).call(api);
-        return getResponse(InstanceDependencyResult.class, clientResponse);
-    }
-
-    //RESUME CHECKSTYLE CHECK VisibilityModifierCheck
-
-    private void checkLifeCycleOption(List<LifeCycle> lifeCycles, String type) throws FalconCLIException {
-        if (lifeCycles != null && !lifeCycles.isEmpty()) {
-            EntityType entityType = EntityType.getEnum(type);
-            for (LifeCycle lifeCycle : lifeCycles) {
-                if (entityType != lifeCycle.getTag().getType()) {
-                    throw new FalconCLIException("Incorrect lifecycle: " + lifeCycle + "for given type: " + type);
-                }
-            }
-        }
-    }
-
-    protected void checkType(String type) throws FalconCLIException {
-        if (type == null || type.isEmpty()) {
-            throw new FalconCLIException("entity type is empty");
-        } else {
-            EntityType entityType = EntityType.getEnum(type);
-            if (entityType == EntityType.CLUSTER) {
-                throw new FalconCLIException(
-                        "Instance management functions don't apply to Cluster entities");
-            }
-        }
-    }
-
-    private String sendAdminRequest(AdminOperations job, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(job.path).addQueryParam(DO_AS_OPT, doAsUser)
-            .call(job);
-        return getResponseAsString(clientResponse);
-    }
-
-    private String sendRequestForReplicationMetrics(final MetadataOperations operation, final String schedEntityType,
-                                                    final String schedEntityName, Integer numResults,
-                                                    final String doAsUser) throws FalconCLIException {
-        WebResource resource = service.path(operation.path)
-                .path(schedEntityName)
-                .path(RelationshipType.REPLICATION_METRICS.getName())
-                .path(FalconMetadataCLI.LIST_OPT);
-
-        if (StringUtils.isNotEmpty(schedEntityName)) {
-            resource = resource.queryParam(FalconCLI.TYPE_OPT, schedEntityType);
-        }
-
-        if (numResults != null) {
-            resource = resource.queryParam(FalconCLI.NUM_RESULTS_OPT, numResults.toString());
-        }
-
-        if (StringUtils.isNotEmpty(doAsUser)) {
-            resource = resource.queryParam(FalconCLI.DO_AS_OPT, doAsUser);
-        }
-
-        ClientResponse clientResponse = resource
-                .header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(operation.mimeType)
-                .method(operation.method, ClientResponse.class);
-
-        printClientResponse(clientResponse);
-
-        checkIfSuccessful(clientResponse);
-        return clientResponse.getEntity(String.class);
-
-    }
-
-    private String sendMetadataDiscoveryRequest(final MetadataOperations operation,
-                                                final String dimensionType,
-                                                final String dimensionName,
-                                                final String cluster,
-                                                final String doAsUser) throws FalconCLIException {
-        WebResource resource;
-        switch (operation) {
-        case LIST:
-            resource = service.path(operation.path)
-                    .path(dimensionType)
-                    .path(FalconMetadataCLI.LIST_OPT);
-            break;
-
-        case RELATIONS:
-            resource = service.path(operation.path)
-                    .path(dimensionType)
-                    .path(dimensionName)
-                    .path(FalconMetadataCLI.RELATIONS_OPT);
-            break;
-
-        default:
-            throw new FalconCLIException("Invalid Metadata client Operation " + operation.toString());
-        }
-
-        if (!StringUtils.isEmpty(cluster)) {
-            resource = resource.queryParam(FalconMetadataCLI.CLUSTER_OPT, cluster);
-        }
-
-        if (StringUtils.isNotEmpty(doAsUser)) {
-            resource = resource.queryParam(FalconCLI.DO_AS_OPT, doAsUser);
-        }
-
-        ClientResponse clientResponse = resource
-                .header("Cookie", AUTH_COOKIE_EQ + authenticationToken)
-                .accept(operation.mimeType).type(operation.mimeType)
-                .method(operation.method, ClientResponse.class);
-
-        printClientResponse(clientResponse);
-
-        checkIfSuccessful(clientResponse);
-        return clientResponse.getEntity(String.class);
-    }
-
-
-    public String getVertex(String id, String doAsUser) throws FalconCLIException {
-        return sendMetadataLineageRequest(MetadataOperations.VERTICES, id, doAsUser);
-    }
-
-    public String getVertices(String key, String value, String doAsUser) throws FalconCLIException {
-        return sendMetadataLineageRequest(MetadataOperations.VERTICES, key, value, doAsUser);
-    }
-
-    public String getVertexEdges(String id, String direction, String doAsUser) throws FalconCLIException {
-        return sendMetadataLineageRequestForEdges(MetadataOperations.VERTICES, id, direction, doAsUser);
-    }
-
-    public String getEdge(String id, String doAsUser) throws FalconCLIException {
-        return sendMetadataLineageRequest(MetadataOperations.EDGES, id, doAsUser);
-    }
-
-    private String getRecipePath(String recipePropertiesFile) throws FalconCLIException {
-        String recipePath = null;
-        if (StringUtils.isNotBlank(recipePropertiesFile)) {
-            File file = new File(recipePropertiesFile);
-            if (file.exists()) {
-                recipePath = file.getAbsoluteFile().getParentFile().getAbsolutePath();
-            }
-        } else {
-            recipePath = clientProperties.getProperty("falcon.recipe.path");
-        }
-
-        return recipePath;
-    }
-
-    public APIResult submitRecipe(String recipeName, String recipeToolClassName,
-                                  final String recipeOperation, String recipePropertiesFile, Boolean skipDryRun,
-                                  final String doAsUser) throws FalconCLIException {
-        String recipePath = getRecipePath(recipePropertiesFile);
-
-        if (StringUtils.isEmpty(recipePath)) {
-            throw new FalconCLIException("falcon.recipe.path is not set in client.properties or properties "
-                    + " file is not provided");
-        }
-
-        String templateFilePath = recipePath + File.separator + recipeName + TEMPLATE_SUFFIX;
-        File file = new File(templateFilePath);
-        if (!file.exists()) {
-            throw new FalconCLIException("Recipe template file does not exist : " + templateFilePath);
-        }
-
-        String propertiesFilePath = recipePath + File.separator + recipeName + PROPERTIES_SUFFIX;
-        file = new File(propertiesFilePath);
-        if (!file.exists()) {
-            throw new FalconCLIException("Recipe properties file does not exist : " + propertiesFilePath);
-        }
-
-        String processFile;
-        try {
-            String prefix =  "falcon-recipe" + "-" + System.currentTimeMillis();
-            File tmpPath = new File("/tmp");
-            if (!tmpPath.exists()) {
-                if (!tmpPath.mkdir()) {
-                    throw new FalconCLIException("Creating directory failed: " + tmpPath.getAbsolutePath());
-                }
-            }
-            File f = File.createTempFile(prefix, ".xml", tmpPath);
-            f.deleteOnExit();
-
-            processFile = f.getAbsolutePath();
-            String[] args = {
-                "-" + RecipeToolArgs.RECIPE_FILE_ARG.getName(), templateFilePath,
-                "-" + RecipeToolArgs.RECIPE_PROPERTIES_FILE_ARG.getName(), propertiesFilePath,
-                "-" + RecipeToolArgs.RECIPE_PROCESS_XML_FILE_PATH_ARG.getName(), processFile,
-                "-" + RecipeToolArgs.RECIPE_OPERATION_ARG.getName(), recipeOperation,
-            };
-
-            if (recipeToolClassName != null) {
-                Class<?> clz = Class.forName(recipeToolClassName);
-                Method method = clz.getMethod("main", String[].class);
-                method.invoke(null, args);
-            } else {
-                RecipeTool.main(args);
-            }
-            validate(EntityType.PROCESS.toString(), processFile, skipDryRun, doAsUser);
-            return submitAndSchedule(EntityType.PROCESS.toString(), processFile, skipDryRun, doAsUser, null);
-        } catch (Exception e) {
-            throw new FalconCLIException(e.getMessage(), e);
-        }
-    }
-
-    private String sendMetadataLineageRequest(MetadataOperations job, String id,
-                                              String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(job.path, id).addQueryParam(DO_AS_OPT, doAsUser)
-            .call(job);
-        return getResponseAsString(clientResponse);
-    }
-
-    private String sendMetadataLineageRequest(MetadataOperations job, String key,
-                                              String value, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(job.path).addQueryParam(DO_AS_OPT, doAsUser)
-            .addQueryParam(KEY, key).addQueryParam(VALUE, value).call(job);
-        return getResponseAsString(clientResponse);
-    }
-
-    private String sendMetadataLineageRequestForEdges(MetadataOperations job, String id,
-                                                      String direction, String doAsUser) throws FalconCLIException {
-        ClientResponse clientResponse = new ResourceBuilder().path(job.path, id, direction)
-            .addQueryParam(DO_AS_OPT, doAsUser).call(job);
-        return getResponseAsString(clientResponse);
-    }
-
-    private void checkIfSuccessful(ClientResponse clientResponse) throws FalconCLIException {
-        Response.Status.Family statusFamily = clientResponse.getClientResponseStatus().getFamily();
-        if (statusFamily != Response.Status.Family.SUCCESSFUL && statusFamily != Response.Status.Family.INFORMATIONAL) {
-            throw FalconCLIException.fromReponse(clientResponse);
-        }
-    }
-
-    private void printClientResponse(ClientResponse clientResponse) {
-        if (getDebugMode()) {
-            OUT.get().println(clientResponse.toString());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/AccessControlList.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/AccessControlList.java b/client/src/main/java/org/apache/falcon/entity/v0/AccessControlList.java
deleted file mode 100644
index 89ce6f9..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/AccessControlList.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-/**
- * Access control list for an Entity.
- */
-public abstract class AccessControlList {
-
-    public abstract String getOwner();
-
-    public abstract String getGroup();
-
-    public abstract String getPermission();
-
-    @Override
-    public String toString() {
-        return "AccessControlList{"
-                + "owner='" + getOwner() + '\''
-                + ", group='" + getGroup() + '\''
-                + ", permission='" + getPermission() + '\''
-                + '}';
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/DateValidator.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/DateValidator.java b/client/src/main/java/org/apache/falcon/entity/v0/DateValidator.java
deleted file mode 100644
index e211f57..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/DateValidator.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity.v0;
-
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Date utility class.
- */
-public final class DateValidator {
-
-    private static final String DATE_PATTERN =
-            "(2\\d\\d\\d|19\\d\\d)-(0[1-9]|1[012])-(0[1-9]|1[0-9]|2[0-9]|3[01])T([0-1][0-9]|2[0-3]):([0-5][0-9])Z";
-    private static final Pattern PATTERN = Pattern.compile(DATE_PATTERN);
-
-    private DateValidator() {
-    }
-
-    /**
-     * Validate date format with regular expression.
-     *
-     * @param date date address for validation
-     * @return true valid date fromat, false invalid date format
-     */
-    public static boolean validate(final String date) {
-        if (StringUtils.isBlank(date)) {
-            return false;
-        }
-        Matcher matcher = PATTERN.matcher(date);
-
-        if (matcher.matches()) {
-
-            matcher.reset();
-
-            if (matcher.find()) {
-
-                int year = Integer.parseInt(matcher.group(1));
-                String month = matcher.group(2);
-                String day = matcher.group(3);
-
-                if (day.equals("31")
-                        && (month.equals("4") || month.equals("6")
-                        || month.equals("9") || month.equals("11")
-                        || month.equals("04") || month.equals("06") || month.equals("09"))) {
-                    return false; // only 1,3,5,7,8,10,12 has 31 days
-                } else if (month.equals("2") || month.equals("02")) {
-                    // leap year
-                    if (year % 4 == 0) {
-                        return !(day.equals("30") || day.equals("31"));
-                    } else {
-                        return !(day.equals("29") || day.equals("30") || day.equals("31"));
-                    }
-                } else {
-                    return true;
-                }
-            } else {
-                return false;
-            }
-        } else {
-            return false;
-        }
-    }
-}


[29/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java
deleted file mode 100644
index 7159966..0000000
--- a/common/src/test/java/org/apache/falcon/entity/parser/ProcessEntityParserTest.java
+++ /dev/null
@@ -1,632 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Property;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.fs.Path;
-import org.mockito.Mockito;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests for validating process entity parser.
- */
-public class ProcessEntityParserTest extends AbstractTestBase {
-
-    private final ProcessEntityParser parser = (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-
-    @Test
-    public void testNotNullgetUnmarshaller() throws Exception {
-        Unmarshaller unmarshaller = EntityType.PROCESS.getUnmarshaller();
-        Assert.assertNotNull(unmarshaller);
-    }
-
-    @BeforeClass
-    public void init() throws Exception {
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
-        this.conf = dfsCluster.getConf();
-    }
-
-    @AfterClass
-    public void tearDown() {
-        this.dfsCluster.shutdown();
-    }
-
-    @Override
-    @BeforeMethod
-    public void setup() throws Exception {
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        storeEntity(EntityType.FEED, "impressionFeed");
-        storeEntity(EntityType.FEED, "clicksFeed");
-        storeEntity(EntityType.FEED, "imp-click-join1");
-        storeEntity(EntityType.FEED, "imp-click-join2");
-        storeEntity(EntityType.PROCESS, "sample");
-        dfsCluster.getFileSystem().mkdirs(new Path("/falcon/test/workflow"));
-    }
-
-    @Test
-    public void testParse() throws FalconException, JAXBException {
-
-        Process process = parser.parseAndValidate(getClass().getResourceAsStream(PROCESS_XML));
-
-        Assert.assertNotNull(process);
-        Assert.assertEquals(process.getName(), "sample");
-
-        Assert.assertEquals(process.getParallel(), 1);
-        Assert.assertEquals(process.getOrder().name(), "LIFO");
-        Assert.assertEquals(process.getFrequency().toString(), "hours(1)");
-        Assert.assertEquals(process.getEntityType(), EntityType.PROCESS);
-
-        Assert.assertEquals(process.getTags(),
-                "consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting");
-        Assert.assertEquals(process.getPipelines(), "testPipeline,dataReplication_Pipeline");
-
-        Assert.assertEquals(process.getInputs().getInputs().get(0).getName(), "impression");
-        Assert.assertEquals(process.getInputs().getInputs().get(0).getFeed(), "impressionFeed");
-        Assert.assertEquals(process.getInputs().getInputs().get(0).getStart(), "today(0,0)");
-        Assert.assertEquals(process.getInputs().getInputs().get(0).getEnd(), "today(2,0)");
-        Assert.assertEquals(process.getInputs().getInputs().get(0).getPartition(), "*/US");
-        Assert.assertEquals(process.getInputs().getInputs().get(0).isOptional(), false);
-
-        Assert.assertEquals(process.getOutputs().getOutputs().get(0).getName(), "impOutput");
-        Assert.assertEquals(process.getOutputs().getOutputs().get(0).getFeed(), "imp-click-join1");
-        Assert.assertEquals(process.getOutputs().getOutputs().get(0).getInstance(), "today(0,0)");
-
-        Assert.assertEquals(process.getProperties().getProperties().get(0).getName(), "name1");
-        Assert.assertEquals(process.getProperties().getProperties().get(0).getValue(), "value1");
-
-        Cluster processCluster = process.getClusters().getClusters().get(0);
-        Assert.assertEquals(SchemaHelper.formatDateUTC(processCluster.getValidity().getStart()), "2011-11-02T00:00Z");
-        Assert.assertEquals(SchemaHelper.formatDateUTC(processCluster.getValidity().getEnd()), "2091-12-30T00:00Z");
-        Assert.assertEquals(process.getTimezone().getID(), "UTC");
-
-        Assert.assertEquals(process.getSla().getShouldStartIn().toString(), "hours(2)");
-        Assert.assertEquals(process.getSla().getShouldEndIn().toString(), "hours(4)");
-
-        Assert.assertEquals(process.getWorkflow().getEngine().name().toLowerCase(), "oozie");
-        Assert.assertEquals(process.getWorkflow().getPath(), "/falcon/test/workflow");
-
-        StringWriter stringWriter = new StringWriter();
-        Marshaller marshaller = EntityType.PROCESS.getMarshaller();
-        marshaller.marshal(process, stringWriter);
-        System.out.println(stringWriter.toString());
-
-        // TODO for retry and late policy
-    }
-
-    @Test
-    public void testELExpressions() throws Exception {
-        Process process = parser.parseAndValidate(getClass().getResourceAsStream(PROCESS_XML));
-        process.getInputs().getInputs().get(0).setStart("lastMonth(0,0,0)");
-        try {
-            parser.validate(process);
-            throw new AssertionError("Expected ValidationException!");
-        } catch (ValidationException e) {
-            //ignore
-        }
-
-        process.getInputs().getInputs().get(0).setStart("today(0,0)");
-        process.getInputs().getInputs().get(0).setEnd("lastMonth(0,0,0)");
-        try {
-            parser.validate(process);
-            throw new AssertionError("Expected ValidationException!");
-        } catch (ValidationException e) {
-            //ignore
-        }
-
-        process.getInputs().getInputs().get(0).setStart("today(2,0)");
-        process.getInputs().getInputs().get(0).setEnd("today(0,0)");
-        try {
-            parser.validate(process);
-            throw new AssertionError("Expected ValidationException!");
-        } catch (ValidationException e) {
-            //ignore
-        }
-    }
-
-    @Test(expectedExceptions = FalconException.class, expectedExceptionsMessageRegExp = "shouldStartIn of Process:.*")
-    public void testInvalidShouldStart() throws FalconException {
-        Process process = parser.parseAndValidate((ProcessEntityParserTest.class
-                .getResourceAsStream(PROCESS_XML)));
-        process.getSla().setShouldStartIn(new Frequency("hours(4)"));
-        process.getSla().setShouldEndIn(new Frequency("hours(2)"));
-        parser.validate(process);
-    }
-
-
-    @Test(expectedExceptions = FalconException.class,
-            expectedExceptionsMessageRegExp = ".* greater than timeout.*")
-    public void testShouldStartGreaterThanTimeout() throws FalconException {
-        Process process = parser.parseAndValidate((ProcessEntityParserTest.class
-                .getResourceAsStream(PROCESS_XML)));
-        process.getSla().setShouldStartIn(new Frequency("hours(2)"));
-        process.setTimeout(new Frequency("hours(1)"));
-        parser.validate(process);
-    }
-
-
-
-    @Test(expectedExceptions = FalconException.class)
-    public void doParseInvalidXML() throws IOException, FalconException {
-
-        String invalidProcessXml = "/config/process/process-invalid.xml";
-        parser.parseAndValidate(this.getClass().getResourceAsStream(invalidProcessXml));
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void applyValidationInvalidProcess() throws Exception {
-        Process process = parser.parseAndValidate(getClass().getResourceAsStream(PROCESS_XML));
-        process.getClusters().getClusters().get(0).setName("invalid cluster");
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testValidate() throws FalconException {
-        parser.parseAndValidate("<process></process>");
-    }
-
-    //SUSPEND CHECKSTYLE CHECK HiddenFieldCheck
-    @Test
-    public void testConcurrentParsing() throws Exception {
-        List<Thread> threadList = new ArrayList<Thread>();
-
-        for (int i = 0; i < 3; i++) {
-            threadList.add(new Thread() {
-                @Override
-                public void run() {
-                    try {
-                        EntityParser parser = EntityParserFactory.getParser(EntityType.PROCESS);
-                        parser.parseAndValidate(this.getClass().getResourceAsStream(PROCESS_XML));
-                    } catch (Exception e) {
-                        throw new RuntimeException(e);
-                    }
-                }
-            });
-        }
-        for (Thread thread : threadList) {
-            thread.start();
-        }
-        for (Thread thread : threadList) {
-            thread.join();
-        }
-    }
-    //RESUME CHECKSTYLE CHECK HiddenFieldCheck
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInvalidDependentFeedsRetentionLimit() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getInputs().getInputs().get(0).setStart("today(-48,0)");
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testDuplicateInputOutputNames() throws FalconException {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getInputs().getInputs().get(0).setName("duplicateName");
-        process.getOutputs().getOutputs().get(0).setName("duplicateName");
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testInvalidRetryAttempt() throws FalconException {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getRetry().setAttempts(-1);
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testInvalidRetryDelay() throws FalconException {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getRetry().setDelay(Frequency.fromString("hours(0)"));
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test()
-    public void testRetryTimeout() throws FalconException {
-        Process process = parser
-                .parseAndValidate(ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML));
-        process.getRetry().setOnTimeout(new Boolean("true"));
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInvalidLateInputs() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getLateProcess().getLateInputs().get(0).setInput("invalidInput");
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testInvalidProcessName() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.setName("name_with_underscore");
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test
-    public void testOozieFutureExpression() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getInputs().getInputs().get(0).setStart("future(1,2)");
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test
-    public void testOozieLatestExpression() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getInputs().getInputs().get(0).setStart("latest(-1)");
-        parser.parseAndValidate(process.toString());
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testDuplicateClusterName() throws Exception {
-        Process process = parser
-                .parse((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().add(1, process.getClusters().getClusters().get(0));
-        parser.validate(process);
-    }
-
-    @Test
-    public void testProcessForTableStorage() throws Exception {
-        Feed inFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                this.getClass().getResource("/config/feed/hive-table-feed.xml"));
-        getStore().publish(EntityType.FEED, inFeed);
-
-        Feed outFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                this.getClass().getResource("/config/feed/hive-table-feed-out.xml"));
-        getStore().publish(EntityType.FEED, outFeed);
-
-        Process process = parser.parse(
-                ProcessEntityParserTest.class.getResourceAsStream("/config/process/process-table.xml"));
-        Input input = process.getInputs().getInputs().get(0);
-        Assert.assertFalse(input.isOptional());
-        parser.validate(process);
-
-        // Test Optional Inputs For Table Storage
-        try {
-            input.setOptional(Boolean.TRUE);
-            Assert.assertTrue(input.isOptional());
-            parser.validate(process);
-            Assert.fail("Validation exception must have been thrown.");
-        } catch (FalconException e) {
-            Assert.assertTrue(e instanceof ValidationException);
-        }
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidateInputPartitionForTable() throws Exception {
-        Process process = parser.parse(
-                ProcessEntityParserTest.class.getResourceAsStream("/config/process/process-table.xml"));
-        if (process.getInputs() != null) {
-            for (Input input : process.getInputs().getInputs()) {
-                input.setPartition("region=usa");
-            }
-        }
-
-        parser.validate(process);
-        Assert.fail("An exception should have been thrown since Input partitions are not supported for table storage");
-    }
-
-    @Test
-    public void testValidateEmailNotification() throws Exception {
-        Process process = parser.parseAndValidate(getClass().getResourceAsStream(PROCESS_XML));
-        Assert.assertNotNull(process.getNotification());
-        Assert.assertEquals(process.getNotification().getTo(), "falcon@localhost");
-        Assert.assertEquals(process.getNotification().getType(), "email");
-    }
-
-    @Test
-    public void testValidateACLWithNoACLAndAuthorizationDisabled() throws Exception {
-        InputStream stream = this.getClass().getResourceAsStream(PROCESS_XML);
-
-        Process process = parser.parse(stream);
-        Assert.assertNotNull(process);
-        Assert.assertNull(process.getACL());
-
-        parser.validate(process);
-    }
-
-    @Test
-    public void testValidateACLWithACLAndAuthorizationDisabled() throws Exception {
-        InputStream stream = this.getClass().getResourceAsStream("/config/process/process-table.xml");
-
-        Process process = parser.parse(stream);
-        Assert.assertNotNull(process);
-        Assert.assertNotNull(process.getACL());
-        Assert.assertNotNull(process.getACL().getOwner());
-        Assert.assertNotNull(process.getACL().getGroup());
-        Assert.assertNotNull(process.getACL().getPermission());
-
-        parser.validate(process);
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLWithNoACLAndAuthorizationEnabled() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_1);
-
-        try {
-            // need a new parser since it caches authorization enabled flag
-            ProcessEntityParser processEntityParser =
-                    (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-            InputStream stream = this.getClass().getResourceAsStream(PROCESS_XML);
-
-            Process process = processEntityParser.parse(stream);
-            Assert.assertNotNull(process);
-            Assert.assertNull(process.getACL());
-
-            processEntityParser.validate(process);
-            Assert.fail("Validation exception should have been thrown for empty ACL");
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAuthorizationEnabledValidOwnerBadGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_1);
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/process/process-table.xml");
-
-            // need a new parser since it caches authorization enabled flag
-            ProcessEntityParser processEntityParser =
-                    (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-            Process process = processEntityParser.parseAndValidate(stream);
-            Assert.assertNotNull(process);
-            Assert.assertNotNull(process.getACL());
-            Assert.assertNotNull(process.getACL().getOwner());
-            Assert.assertNotNull(process.getACL().getGroup());
-            Assert.assertNotNull(process.getACL().getPermission());
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test
-    public void testValidateACLAuthorizationEnabledValidGroupBadOwner() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate(USER); // valid user but acl owner is falcon
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/process/process-table.xml");
-
-            // need a new parser since it caches authorization enabled flag
-            ProcessEntityParser processEntityParser =
-                    (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-            Process process = processEntityParser.parse(stream);
-            Assert.assertNotNull(process);
-            Assert.assertNotNull(process.getACL());
-            Assert.assertNotNull(process.getACL().getOwner());
-            Assert.assertNotNull(process.getACL().getGroup());
-            Assert.assertNotNull(process.getACL().getPermission());
-
-            process.getACL().setOwner(USER);
-            process.getACL().setGroup(getPrimaryGroupName());
-
-            processEntityParser.validate(process);
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAuthorizationEnabledBadOwnerAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate("blah");
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/process/process-table.xml");
-
-            // need a new parser since it caches authorization enabled flag
-            ProcessEntityParser processEntityParser =
-                    (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-            Process process = processEntityParser.parse(stream);
-
-            Assert.assertNotNull(process);
-            Assert.assertNotNull(process.getACL());
-            Assert.assertNotNull(process.getACL().getOwner());
-            Assert.assertNotNull(process.getACL().getGroup());
-            Assert.assertNotNull(process.getACL().getPermission());
-
-            processEntityParser.validate(process);
-            Assert.fail("Validation exception should have been thrown for invalid owner");
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    /**
-     * A negative test for validating pipelines tag which is comma separated values.
-     * @throws FalconException
-     */
-    @Test
-    public void testPipelineTags() throws FalconException {
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/process/process-bad-pipeline.xml");
-
-            parser.parse(stream);
-            Assert.fail("org.xml.sax.SAXParseException should have been thrown.");
-        } catch (FalconException e) {
-            Assert.assertEquals(javax.xml.bind.UnmarshalException.class, e.getCause().getClass());
-        }
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testEndTimeProcessBeforeStartTime() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setEnd(
-                SchemaHelper.parseDateUTC("2010-12-31T00:00Z"));
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInstanceStartTimeBeforeFeedStartTimeForInput() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setStart(
-                SchemaHelper.parseDateUTC("2011-10-31T00:00Z"));
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInstanceEndTimeAfterFeedEndTimeForInput() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setStart(
-                SchemaHelper.parseDateUTC("2011-12-31T00:00Z"));
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInstanceTimeBeforeFeedStartTimeForOutput() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setStart(
-                SchemaHelper.parseDateUTC("2011-11-02T00:00Z"));
-        process.getOutputs().getOutputs().get(0).setInstance("yesterday(-60,0)");
-        parser.validate(process);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInstanceTimeAfterFeedEndTimeForOutput() throws Exception {
-        Process process = parser
-                .parseAndValidate((ProcessEntityParserTest.class
-                        .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setStart(
-                SchemaHelper.parseDateUTC("2011-12-30T00:00Z"));
-        process.getOutputs().getOutputs().get(0).setInstance("today(120,0)");
-        parser.validate(process);
-    }
-
-    @Test
-    public void testValidateProcessProperties() throws Exception {
-        ProcessEntityParser processEntityParser = Mockito
-                .spy((ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS));
-        InputStream stream = this.getClass().getResourceAsStream("/config/process/process-0.1.xml");
-        Process process = parser.parse(stream);
-
-        Mockito.doNothing().when(processEntityParser).validateACL(process);
-
-        // Good set of properties, should work
-        processEntityParser.validate(process);
-
-        // add duplicate property, should throw validation exception.
-        Property property1 = new Property();
-        property1.setName("name1");
-        property1.setValue("any value");
-        process.getProperties().getProperties().add(property1);
-        try {
-            processEntityParser.validate(process);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-
-        // Remove duplicate property. It should not throw exception anymore
-        process.getProperties().getProperties().remove(property1);
-        processEntityParser.validate(process);
-
-        // add empty property name, should throw validation exception.
-        property1.setName("");
-        process.getProperties().getProperties().add(property1);
-        try {
-            processEntityParser.validate(process);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-    }
-
-    @Test
-    public void testProcessEndTimeOptional() throws FalconException {
-        Process process = parser.parseAndValidate((ProcessEntityParserTest.class
-                .getResourceAsStream(PROCESS_XML)));
-        process.getClusters().getClusters().get(0).getValidity().setEnd(null);
-        parser.validate(process);
-    }
-
-    @Test
-    public void testProcessEndTime() throws FalconException {
-        Process process = parser.parseAndValidate((ProcessEntityParserTest.class
-                .getResourceAsStream(PROCESS_XML)));
-        String feedName = process.getInputs().getInputs().get(0).getFeed();
-        Feed feedEntity = EntityUtil.getEntity(EntityType.FEED, feedName);
-        feedEntity.getClusters().getClusters().get(0).getValidity().setEnd(null);
-        process.getClusters().getClusters().get(0).getValidity().setEnd(null);
-        parser.validate(process);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/store/ConfigurationStoreTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/store/ConfigurationStoreTest.java b/common/src/test/java/org/apache/falcon/entity/store/ConfigurationStoreTest.java
deleted file mode 100644
index fa3d3f4..0000000
--- a/common/src/test/java/org/apache/falcon/entity/store/ConfigurationStoreTest.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.service.ConfigurationChangeListener;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.testng.Assert;
-import org.testng.annotations.AfterSuite;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeSuite;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-
-/**
- * Tests for validating configuration store.
- */
-public class ConfigurationStoreTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ConfigurationStoreTest.class);
-    private static final String PROCESS1NAME = "process1";
-    private static final String PROCESS2NAME = "process2";
-    private static final String PROCESS3NAME = "process3";
-
-    private ConfigurationStore store = ConfigurationStore.get();
-    private TestListener listener = new TestListener();
-
-    private class TestListener implements ConfigurationChangeListener {
-        @Override
-        public void onAdd(Entity entity) throws FalconException {
-            throw new FalconException("For test");
-        }
-
-        @Override
-        public void onRemove(Entity entity) throws FalconException {
-            throw new FalconException("For test");
-        }
-
-        @Override
-        public void onChange(Entity oldEntity, Entity newEntity) throws FalconException {
-            throw new FalconException("For test");
-        }
-
-        @Override
-        public void onReload(Entity entity) throws FalconException {
-            throw new FalconException("For test");
-        }
-    }
-
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        System.out.println("in beforeMethod");
-        Process process1 = new Process();
-        process1.setName(PROCESS1NAME);
-        store.publish(EntityType.PROCESS, process1);
-
-        Process process2 = new Process();
-        process2.setName(PROCESS2NAME);
-        store.publish(EntityType.PROCESS, process2);
-
-        Process process3 = new Process();
-        process3.setName(PROCESS3NAME);
-        store.publish(EntityType.PROCESS, process3);
-    }
-
-    @Test
-    public void testPublish() throws Exception {
-        Process process = new Process();
-        process.setName("hello");
-        store.publish(EntityType.PROCESS, process);
-        Process p = store.get(EntityType.PROCESS, "hello");
-        Assert.assertEquals(p, process);
-
-        store.registerListener(listener);
-        process.setName("world");
-        try {
-            store.publish(EntityType.PROCESS, process);
-            throw new AssertionError("Expected exception");
-        } catch(FalconException expected) {
-            //expected
-        }
-        store.unregisterListener(listener);
-    }
-
-    @Test
-    public void testGet() throws Exception {
-        Process p = store.get(EntityType.PROCESS, "notfound");
-        Assert.assertNull(p);
-    }
-
-    @Test
-    public void testRemove() throws Exception {
-        Process process = new Process();
-        process.setName("remove");
-        store.publish(EntityType.PROCESS, process);
-
-        Process p = store.get(EntityType.PROCESS, "remove");
-        Assert.assertEquals(p, process);
-        store.remove(EntityType.PROCESS, "remove");
-        p = store.get(EntityType.PROCESS, "remove");
-        Assert.assertNull(p);
-
-        store.publish(EntityType.PROCESS, process);
-        store.registerListener(listener);
-        try {
-            store.remove(EntityType.PROCESS, "remove");
-            throw new AssertionError("Expected exception");
-        } catch(FalconException expected) {
-            //expected
-        }
-        store.unregisterListener(listener);
-    }
-
-
-    @Test(threadPoolSize = 3, invocationCount = 6)
-    public void testConcurrentRemoveOfSameProcess() throws Exception {
-        store.remove(EntityType.PROCESS, PROCESS1NAME);
-        Process p = store.get(EntityType.PROCESS, PROCESS1NAME);
-        Assert.assertNull(p);
-    }
-
-    @Test(threadPoolSize = 3, invocationCount = 6)
-    public void testConcurrentRemove() throws Exception {
-        store.remove(EntityType.PROCESS, PROCESS2NAME);
-        Process p1 = store.get(EntityType.PROCESS, PROCESS2NAME);
-        Assert.assertNull(p1);
-
-        store.remove(EntityType.PROCESS, PROCESS3NAME);
-        Process p2 = store.get(EntityType.PROCESS, PROCESS3NAME);
-        Assert.assertNull(p2);
-    }
-
-    @BeforeSuite
-    @AfterSuite
-    public void cleanup() throws IOException {
-        Path path = new Path(StartupProperties.get().
-                getProperty("config.store.uri"));
-        FileSystem fs = FileSystem.get(path.toUri(), new Configuration());
-        fs.delete(path, true);
-        LOG.info("Cleaned up {}", path);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/store/FeedLocationStoreTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/store/FeedLocationStoreTest.java b/common/src/test/java/org/apache/falcon/entity/store/FeedLocationStoreTest.java
deleted file mode 100644
index 033a55b..0000000
--- a/common/src/test/java/org/apache/falcon/entity/store/FeedLocationStoreTest.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.Clusters;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.FalconRadixUtils;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.File;
-import java.net.URI;
-import java.util.Collection;
-
-
-/**
- * Tests for FeedLocationStore.
- */
-public class FeedLocationStoreTest extends AbstractTestBase {
-    private ConfigurationStore store;
-
-
-    @BeforeClass
-    public void initConfigStore() throws Exception {
-        String configPath = new URI(StartupProperties.get().getProperty("config.store.uri")).getPath();
-        String location = configPath + "-" + getClass().getName();
-        StartupProperties.get().setProperty("config.store.uri", location);
-        FileUtils.deleteDirectory(new File(location));
-
-        cleanupStore();
-        String listeners = StartupProperties.get().getProperty("configstore.listeners");
-        listeners = listeners.replace("org.apache.falcon.service.SharedLibraryHostingService", "");
-        listeners = listeners.replace("org.apache.falcon.service.FeedSLAMonitoringService", "");
-        StartupProperties.get().setProperty("configstore.listeners", listeners);
-        store = ConfigurationStore.get();
-        store.init();
-
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_2);
-
-    }
-    @BeforeMethod
-    public void setUp() throws FalconException{
-        cleanupStore();
-        createClusters();
-    }
-
-    @AfterMethod
-    public void print() {
-        System.out.printf("%s", FeedLocationStore.get().store);
-    }
-
-    @Test
-    public void testOnAddSameLocation() throws FalconException{
-        Feed f1 = createFeed("f1SameLocations");
-        int initialSize = FeedLocationStore.get().store.getSize();
-        f1.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-        f1.getLocations().getLocations().add(createLocation(LocationType.STATS,
-                "/projects/cas/stats/hourly/2014/09/09/09"));
-
-        Feed f2 = createFeed("f2SameLocations");
-        f2.getLocations().getLocations().add(createLocation(LocationType.STATS,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-        f2.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/projects/cas/stats/hourly/2014/09/09/09"));
-
-        store.publish(EntityType.FEED, f1);
-        store.publish(EntityType.FEED, f2);
-        int finalSize = FeedLocationStore.get().store.getSize();
-        Assert.assertEquals(finalSize - initialSize, 8);
-    }
-
-    @Test
-    public void testOnRemove() throws FalconException{
-        int initialSize = FeedLocationStore.get().store.getSize();
-
-        Feed f1 = createFeed("f1ForRemove");
-        f1.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-        f1.getLocations().getLocations().add(createLocation(LocationType.STATS,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-
-        store.publish(EntityType.FEED, f1);
-        Assert.assertEquals(FeedLocationStore.get().store.getSize() - initialSize, 4);
-        store.remove(EntityType.FEED, "f1ForRemove");
-        Assert.assertEquals(FeedLocationStore.get().store.getSize(), initialSize);
-
-    }
-
-
-    @Test
-    public void testOnChange() throws FalconException{
-        Feed f1 = createFeed("f1");
-        f1.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-        store.publish(EntityType.FEED, f1);
-
-        Feed f2 = createFeed("f1");
-        f2.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/projects/cas/data/monthly"));
-        store.initiateUpdate(f2);
-        store.update(EntityType.FEED, f2);
-        store.cleanupUpdateInit();
-
-        Feed f3 = createFeed("f2");
-        f3.getLocations().getLocations().add(createLocation(LocationType.STATS,
-                "/projects/cas/data/hourly/2014/09/09/09"));
-        store.publish(EntityType.FEED, f3);
-
-    }
-
-    @Test
-    public void testWithClusterLocations() throws FalconException {
-        Feed f = createFeedWithClusterLocations("clusterFeed");
-        int initialSize = FeedLocationStore.get().store.getSize();
-        store.publish(EntityType.FEED, f);
-        Assert.assertEquals(FeedLocationStore.get().store.getSize() - initialSize, 6);
-        store.remove(EntityType.FEED, "clusterFeed");
-        Assert.assertEquals(FeedLocationStore.get().store.getSize(), initialSize);
-    }
-
-
-    @Test
-    public void testFindWithRegularExpression() throws FalconException {
-        Feed f = createFeed("findUsingRegexFeed");
-        f.getLocations().getLocations().add(createLocation(LocationType.DATA,
-                "/falcon/test/input/${YEAR}/${MONTH}/${DAY}/${HOUR}"));
-        store.publish(EntityType.FEED, f);
-        Assert.assertNotNull(FeedLocationStore.get().store.find("/falcon/test/input/2014/12/12/23",
-                new FalconRadixUtils.FeedRegexAlgorithm()));
-    }
-
-    @Test
-    public void testAddCatalogStorageFeeds() throws FalconException {
-        //this test ensure that catalog feeds are ignored in FeedLocationStore
-        Feed f = createCatalogFeed("catalogFeed");
-        store.publish(EntityType.FEED, f);
-        Assert.assertTrue(true);
-    }
-
-    private Feed createCatalogFeed(String name) {
-        Feed f = new Feed();
-        f.setName(name);
-        f.setClusters(createBlankClusters());
-        f.setTable(new CatalogTable());
-        return f;
-    }
-
-    private Feed createFeed(String name){
-        Feed f = new Feed();
-        Locations locations = new Locations();
-        f.setLocations(locations);
-        f.setName(name);
-        f.setClusters(createBlankClusters());
-        return f;
-    }
-
-
-    private Feed createFeedWithClusterLocations(String name) {
-        Feed f = new Feed();
-        f.setLocations(new Locations());
-        f.getLocations().getLocations().add(createLocation(LocationType.DATA, "/projects/cas/data"));
-        f.getLocations().getLocations().add(createLocation(LocationType.STATS, "/projects/cas/stats"));
-        f.getLocations().getLocations().add(createLocation(LocationType.META, "/projects/cas/meta"));
-        f.setName(name);
-        f.setClusters(createClustersWithLocations());
-        return f;
-    }
-
-    private Location createLocation(LocationType type, String path){
-        Location location = new Location();
-        location.setPath(path);
-        location.setType(type);
-        return location;
-    }
-
-    protected void cleanupStore() throws FalconException {
-        store = ConfigurationStore.get();
-        for (EntityType type : EntityType.values()) {
-            Collection<String> entities = store.getEntities(type);
-            for (String entity : entities) {
-                store.remove(type, entity);
-            }
-        }
-    }
-
-    private Clusters createClustersWithLocations() {
-        Clusters clusters = new Clusters();
-        Cluster cluster1 = new Cluster();
-        cluster1.setName("cluster1WithLocations");
-        cluster1.setLocations(new Locations());
-        cluster1.getLocations().getLocations().add(createLocation(LocationType.DATA, "/projects/cas/cluster1/data"));
-        cluster1.getLocations().getLocations().add(createLocation(LocationType.STATS, "/projects/cas/cluster1/stats"));
-        cluster1.getLocations().getLocations().add(createLocation(LocationType.META, "/projects/cas/cluster1/meta"));
-
-        Cluster cluster2 = new Cluster();
-        cluster2.setName("cluster2WithLocations");
-        cluster2.setLocations(new Locations());
-        cluster2.getLocations().getLocations().add(createLocation(LocationType.DATA, "/projects/cas/cluster2/data"));
-        cluster2.getLocations().getLocations().add(createLocation(LocationType.STATS, "/projects/cas/cluster2/stats"));
-        cluster2.getLocations().getLocations().add(createLocation(LocationType.META, "/projects/cas/cluster2/meta"));
-
-        clusters.getClusters().add(cluster1);
-        clusters.getClusters().add(cluster2);
-
-        return clusters;
-    }
-
-    private Clusters createBlankClusters() {
-        Clusters clusters = new Clusters();
-
-        Cluster cluster = new Cluster();
-        cluster.setName("blankCluster1");
-        clusters.getClusters().add(cluster);
-
-        Cluster cluster2 = new Cluster();
-        cluster2.setName("blankCluster2");
-        clusters.getClusters().add(cluster2);
-
-        return clusters;
-    }
-
-    private void createClusters() throws FalconException {
-        String[] clusterNames = {"cluster1WithLocations", "cluster2WithLocations", "blankCluster1", "blankCluster2"};
-        for (String name : clusterNames) {
-            org.apache.falcon.entity.v0.cluster.Cluster cluster = new org.apache.falcon.entity.v0.cluster.Cluster();
-            cluster.setName(name);
-            cluster.setColo("default");
-            store.publish(EntityType.CLUSTER, cluster);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java b/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
deleted file mode 100644
index 23f69d7..0000000
--- a/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
+++ /dev/null
@@ -1,407 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.feed.Argument;
-import org.apache.falcon.entity.v0.feed.Arguments;
-import org.apache.falcon.entity.v0.feed.Clusters;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Extract;
-import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.FieldsType;
-import org.apache.falcon.entity.v0.feed.FieldIncludeExclude;
-import org.apache.falcon.entity.v0.feed.Import;
-import org.apache.falcon.entity.v0.feed.MergeType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import java.util.List;
-import java.util.Set;
-
-/**
- * Entity graph tests.
- */
-public class EntityGraphTest extends AbstractTestBase {
-
-    private ConfigurationStore store = ConfigurationStore.get();
-
-    private EntityGraph graph = EntityGraph.get();
-
-    @Test
-    public void testOnAdd() throws Exception {
-
-        Process process = new Process();
-        process.setName("p1");
-        Cluster cluster = new Cluster();
-        cluster.setName("c1");
-        cluster.setColo("1");
-        Feed f1 = addInput(process, "f1", cluster);
-        Feed f2 = addInput(process, "f2", cluster);
-        Feed f3 = addOutput(process, "f3", cluster);
-        Feed f4 = addOutput(process, "f4", cluster);
-        org.apache.falcon.entity.v0.process.Cluster processCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        processCluster.setName("c1");
-        process.setClusters(new org.apache.falcon.entity.v0.process.Clusters());
-        process.getClusters().getClusters().add(processCluster);
-
-        store.publish(EntityType.CLUSTER, cluster);
-        store.publish(EntityType.FEED, f1);
-        store.publish(EntityType.FEED, f2);
-        store.publish(EntityType.FEED, f3);
-        store.publish(EntityType.FEED, f4);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<Entity> entities = graph.getDependents(process);
-        Assert.assertEquals(entities.size(), 5);
-        Assert.assertTrue(entities.contains(cluster));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f2));
-        Assert.assertTrue(entities.contains(f3));
-        Assert.assertTrue(entities.contains(f4));
-
-        entities = graph.getDependents(f1);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(process));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f2);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(process));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f3);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(process));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f4);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(process));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(cluster);
-        Assert.assertEquals(entities.size(), 5);
-        Assert.assertTrue(entities.contains(process));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f2));
-        Assert.assertTrue(entities.contains(f3));
-        Assert.assertTrue(entities.contains(f4));
-    }
-
-    private Feed addInput(Process process, String feed, Cluster cluster) {
-        if (process.getInputs() == null) {
-            process.setInputs(new Inputs());
-        }
-        Inputs inputs = process.getInputs();
-        Input input = new Input();
-        input.setFeed(feed);
-        inputs.getInputs().add(input);
-        Feed f1 = new Feed();
-        f1.setName(feed);
-        Clusters clusters = new Clusters();
-        f1.setClusters(clusters);
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                new org.apache.falcon.entity.v0.feed.Cluster();
-        feedCluster.setName(cluster.getName());
-        clusters.getClusters().add(feedCluster);
-        return f1;
-    }
-
-    private Feed addFeedImport(String feed, Cluster cluster, Datasource ds) {
-
-        Feed f1 = new Feed();
-        f1.setName(feed);
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                new org.apache.falcon.entity.v0.feed.Cluster();
-        feedCluster.setName(cluster.getName());
-        feedCluster.setType(ClusterType.SOURCE);
-        Clusters clusters = new Clusters();
-        clusters.getClusters().add(feedCluster);
-        f1.setClusters(clusters);
-
-        Import imp = getAnImport(MergeType.SNAPSHOT, ds);
-        f1.getClusters().getClusters().get(0).setImport(imp);
-        return f1;
-    }
-
-    private Import getAnImport(MergeType mergeType, Datasource ds) {
-        Extract extract = new Extract();
-        extract.setType(ExtractMethod.FULL);
-        extract.setMergepolicy(mergeType);
-
-        FieldsType fields = new FieldsType();
-        FieldIncludeExclude fieldInclude = new FieldIncludeExclude();
-        fieldInclude.getFields().add("id");
-        fieldInclude.getFields().add("name");
-        fields.setIncludes(fieldInclude);
-
-        org.apache.falcon.entity.v0.feed.Datasource source = new org.apache.falcon.entity.v0.feed.Datasource();
-        source.setName(ds.getName());
-        source.setTableName("test-table");
-        source.setExtract(extract);
-        source.setFields(fields);
-
-        Argument a1 = new Argument();
-        a1.setName("--split_by");
-        a1.setValue("id");
-        Argument a2 = new Argument();
-        a2.setName("--num-mappers");
-        a2.setValue("2");
-        Arguments args = new Arguments();
-        List<Argument> argList = args.getArguments();
-        argList.add(a1);
-        argList.add(a2);
-
-        Import imp = new Import();
-        imp.setSource(source);
-        imp.setArguments(args);
-        return imp;
-    }
-
-    private void attachInput(Process process, Feed feed) {
-        if (process.getInputs() == null) {
-            process.setInputs(new Inputs());
-        }
-        Inputs inputs = process.getInputs();
-        Input input = new Input();
-        input.setFeed(feed.getName());
-        inputs.getInputs().add(input);
-    }
-
-    private Feed addOutput(Process process, String feed, Cluster cluster) {
-        if (process.getOutputs() == null) {
-            process.setOutputs(new Outputs());
-        }
-        Outputs outputs = process.getOutputs();
-        Output output = new Output();
-        output.setFeed(feed);
-        outputs.getOutputs().add(output);
-        Feed f1 = new Feed();
-        f1.setName(feed);
-        Clusters clusters = new Clusters();
-        f1.setClusters(clusters);
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                new org.apache.falcon.entity.v0.feed.Cluster();
-        feedCluster.setName(cluster.getName());
-        clusters.getClusters().add(feedCluster);
-        return f1;
-    }
-
-    @Test
-    public void testOnRemove() throws Exception {
-        Process process = new Process();
-        process.setName("rp1");
-        Cluster cluster = new Cluster();
-        cluster.setName("rc1");
-        cluster.setColo("2");
-        org.apache.falcon.entity.v0.process.Cluster processCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        processCluster.setName("rc1");
-        process.setClusters(new org.apache.falcon.entity.v0.process.Clusters());
-        process.getClusters().getClusters().add(processCluster);
-
-        store.publish(EntityType.CLUSTER, cluster);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<Entity> entities = graph.getDependents(process);
-        Assert.assertEquals(entities.size(), 1);
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(cluster);
-        Assert.assertEquals(entities.size(), 1);
-        Assert.assertTrue(entities.contains(process));
-
-        store.remove(EntityType.PROCESS, process.getName());
-        entities = graph.getDependents(cluster);
-        Assert.assertTrue(entities == null);
-
-        entities = graph.getDependents(process);
-        Assert.assertTrue(entities == null);
-    }
-
-    @Test
-    public void testOnRemove2() throws Exception {
-
-        Process p1 = new Process();
-        p1.setName("ap1");
-        Process p2 = new Process();
-        p2.setName("ap2");
-        Cluster cluster = new Cluster();
-        cluster.setName("ac1");
-        cluster.setColo("3");
-        Feed f1 = addInput(p1, "af1", cluster);
-        Feed f3 = addOutput(p1, "af3", cluster);
-        Feed f2 = addOutput(p2, "af2", cluster);
-        attachInput(p2, f3);
-        org.apache.falcon.entity.v0.process.Cluster processCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        processCluster.setName("ac1");
-        p1.setClusters(new org.apache.falcon.entity.v0.process.Clusters());
-        p1.getClusters().getClusters().add(processCluster);
-        processCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        processCluster.setName("ac1");
-        p2.setClusters(new org.apache.falcon.entity.v0.process.Clusters());
-        p2.getClusters().getClusters().add(processCluster);
-
-        store.publish(EntityType.CLUSTER, cluster);
-        store.publish(EntityType.FEED, f1);
-        store.publish(EntityType.FEED, f2);
-        store.publish(EntityType.FEED, f3);
-        store.publish(EntityType.PROCESS, p1);
-        store.publish(EntityType.PROCESS, p2);
-
-        Set<Entity> entities = graph.getDependents(p1);
-        Assert.assertEquals(entities.size(), 3);
-        Assert.assertTrue(entities.contains(cluster));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f3));
-
-        entities = graph.getDependents(p2);
-        Assert.assertEquals(entities.size(), 3);
-        Assert.assertTrue(entities.contains(cluster));
-        Assert.assertTrue(entities.contains(f2));
-        Assert.assertTrue(entities.contains(f3));
-
-        entities = graph.getDependents(f1);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f2);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(p2));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f3);
-        Assert.assertEquals(entities.size(), 3);
-        Assert.assertTrue(entities.contains(p2));
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(cluster);
-        Assert.assertEquals(entities.size(), 5);
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(p2));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f2));
-        Assert.assertTrue(entities.contains(f3));
-
-        store.remove(EntityType.PROCESS, p2.getName());
-        store.remove(EntityType.FEED, f2.getName());
-
-        entities = graph.getDependents(p1);
-        Assert.assertEquals(entities.size(), 3);
-        Assert.assertTrue(entities.contains(cluster));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f3));
-
-        entities = graph.getDependents(p2);
-        Assert.assertTrue(entities == null);
-
-        entities = graph.getDependents(f1);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(f2);
-        Assert.assertTrue(entities == null);
-
-        entities = graph.getDependents(f3);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(cluster));
-
-        entities = graph.getDependents(cluster);
-        Assert.assertEquals(entities.size(), 3);
-        Assert.assertTrue(entities.contains(p1));
-        Assert.assertTrue(entities.contains(f1));
-        Assert.assertTrue(entities.contains(f3));
-    }
-
-    @Test
-    public void testOnChange() throws Exception {
-    }
-
-    @Test
-    public void testOnAddImport() throws Exception {
-
-        Datasource ds = new Datasource();
-        ds.setName("test-db");
-        ds.setColo("c1");
-
-        Cluster cluster = new Cluster();
-        cluster.setName("ci1");
-        cluster.setColo("c1");
-
-        Feed f1 = addFeedImport("fi1", cluster, ds);
-
-        store.publish(EntityType.CLUSTER, cluster);
-        store.publish(EntityType.DATASOURCE, ds);
-        store.publish(EntityType.FEED, f1);
-
-        Set<Entity> entities = graph.getDependents(cluster);
-        Assert.assertEquals(entities.size(), 1);
-        Assert.assertTrue(entities.contains(f1));
-
-        entities = graph.getDependents(ds);
-        Assert.assertEquals(entities.size(), 1);
-        Assert.assertTrue(entities.contains(f1));
-
-        entities = graph.getDependents(f1);
-        Assert.assertEquals(entities.size(), 2);
-        Assert.assertTrue(entities.contains(cluster));
-        Assert.assertTrue(entities.contains(ds));
-
-        store.remove(EntityType.FEED, "fi1");
-        store.remove(EntityType.DATASOURCE, "test-db");
-        store.remove(EntityType.CLUSTER, "ci1");
-    }
-
-    @Test
-    public void testOnRemoveDatasource() throws Exception {
-
-        Datasource ds = new Datasource();
-        ds.setName("test-db");
-        ds.setColo("c1");
-
-        Cluster cluster = new Cluster();
-        cluster.setName("ci1");
-        cluster.setColo("c1");
-
-        Feed f1 = addFeedImport("fi1", cluster, ds);
-
-        store.publish(EntityType.CLUSTER, cluster);
-        store.publish(EntityType.DATASOURCE, ds);
-        store.publish(EntityType.FEED, f1);
-
-        store.remove(EntityType.DATASOURCE, "test-db");
-
-        Set<Entity> entities = graph.getDependents(f1);
-        Assert.assertEquals(1, entities.size());
-        Assert.assertTrue(entities.contains(cluster));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/expression/ExpressionHelperTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/expression/ExpressionHelperTest.java b/common/src/test/java/org/apache/falcon/expression/ExpressionHelperTest.java
deleted file mode 100644
index da5dbca..0000000
--- a/common/src/test/java/org/apache/falcon/expression/ExpressionHelperTest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.expression;
-
-import org.apache.falcon.FalconException;
-import org.testng.Assert;
-import org.testng.annotations.BeforeTest;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.text.ParseException;
-import java.util.Date;
-
-/**
- * Unit test cases for EL Expressions.
- */
-public class ExpressionHelperTest {
-
-    private ExpressionHelper expressionHelper = ExpressionHelper.get();
-
-    @BeforeTest
-    public void init() throws ParseException {
-        Date referenceDate = ExpressionHelper.FORMATTER.get().parse("2015-02-01T00:00Z");
-        expressionHelper.setReferenceDate(referenceDate);
-    }
-
-    @Test(dataProvider = "ElExpressions")
-    public void testStartOffset(String expression, String expectedDateStr) throws FalconException {
-        Date evalDate = expressionHelper.evaluate(expression, Date.class);
-        String evalDateStr = ExpressionHelper.FORMATTER.get().format(evalDate);
-        Assert.assertEquals(evalDateStr, expectedDateStr);
-    }
-
-
-    @DataProvider(name = "ElExpressions")
-    public Object[][] createOffsets() {
-        return new Object[][] {
-            {"now(-10,-30)", "2015-01-31T13:30Z"},
-            {"now(10,-30)", "2015-02-01T09:30Z"},
-
-            {"today(0,0)", "2015-02-01T00:00Z"},
-            {"today(-1,0)", "2015-01-31T23:00Z"},
-            {"yesterday(0,0)", "2015-01-31T00:00Z"},
-            {"yesterday(-1,0)", "2015-01-30T23:00Z"},
-            {"yesterday(1,30)", "2015-01-31T01:30Z"},
-
-            {"currentMonth(2,0,0)", "2015-02-03T00:00Z"},
-            {"currentMonth(-2,1,30)", "2015-01-30T01:30Z"},
-            {"lastMonth(3,0,0)", "2015-01-04T00:00Z"},
-            {"lastMonth(-3,0,0)", "2014-12-29T00:00Z"},
-
-            {"currentWeek('THU',0,0)", "2015-01-29T00:00Z"},
-            {"currentWeek('SUN',0,0)", "2015-02-01T00:00Z"},
-            {"lastWeek('THU',0,0)", "2015-01-22T00:00Z"},
-            {"lastWeek('SUN',0,0)", "2015-01-25T00:00Z"},
-
-            {"currentYear(1,1,0,0)", "2015-02-02T00:00Z"},
-            {"currentYear(-1,1,0,0)", "2014-12-02T00:00Z"},
-            {"lastYear(1,1,0,0)", "2014-02-02T00:00Z"},
-            {"lastYear(-1,1,0,0)", "2013-12-02T00:00Z"},
-
-            // latest and future will return the reference time
-            {"latest(0)", "2015-02-01T00:00Z"},
-            {"latest(-1)", "2015-02-01T00:00Z"},
-            {"future(0,0)", "2015-02-01T00:00Z"},
-            {"future(1,0)", "2015-02-01T00:00Z"},
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/group/FeedGroupMapTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/group/FeedGroupMapTest.java b/common/src/test/java/org/apache/falcon/group/FeedGroupMapTest.java
deleted file mode 100644
index a6c52e3..0000000
--- a/common/src/test/java/org/apache/falcon/group/FeedGroupMapTest.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.group;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.util.Map;
-
-/**
- * Feed group map tests.
- */
-public class FeedGroupMapTest extends AbstractTestBase {
-    private static Cluster cluster;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        cluster = (Cluster) EntityType.CLUSTER
-                .getUnmarshaller()
-                .unmarshal(
-                        FeedGroupMapTest.class
-                                .getResourceAsStream("/config/cluster/cluster-0.1.xml"));
-    }
-
-    @BeforeMethod
-    public void cleanup() throws Exception {
-        cleanupStore();
-    }
-
-    @Test
-    public void testOnAdd() throws FalconException, JAXBException {
-        getStore().publish(EntityType.CLUSTER, cluster);
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("f1");
-        feed1.setGroups("group1,group2,group3");
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.setLocations(new Locations());
-        feed1.getLocations().getLocations().add(location);
-        getStore().publish(EntityType.FEED, feed1);
-        Map<String, FeedGroup> groupMapping = FeedGroupMap.get()
-                .getGroupsMapping();
-
-        FeedGroup group = groupMapping.get("group1");
-        Assert.assertEquals(group.getName(), "group1");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-        assertFields(group, feed1);
-
-        group = groupMapping.get("group2");
-        Assert.assertEquals(group.getName(), "group2");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-        assertFields(group, feed1);
-
-        group = groupMapping.get("group3");
-        Assert.assertEquals(group.getName(), "group3");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-        assertFields(group, feed1);
-
-        Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-
-        feed2.setName("f2");
-        feed2.setGroups("group1,group5,group3");
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/${MONTH}/${DAY}/ad2");
-        location.setType(LocationType.DATA);
-        feed2.setLocations(new Locations());
-        feed2.getLocations().getLocations().add(location);
-        getStore().publish(EntityType.FEED, feed2);
-        groupMapping = FeedGroupMap.get().getGroupsMapping();
-
-        group = groupMapping.get("group1");
-        Assert.assertEquals(group.getName(), "group1");
-        Assert.assertEquals(group.getFeeds().size(), 2);
-        assertFields(group, feed2);
-
-        group = groupMapping.get("group2");
-        Assert.assertEquals(group.getName(), "group2");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-        assertFields(group, feed2);
-
-        group = groupMapping.get("group3");
-        Assert.assertEquals(group.getName(), "group3");
-        Assert.assertEquals(group.getFeeds().size(), 2);
-        assertFields(group, feed2);
-
-        group = groupMapping.get("group5");
-        Assert.assertEquals(group.getName(), "group5");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-        assertFields(group, feed2);
-
-    }
-
-    @Test
-    public void testOnRemove() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("f1");
-        getStore().publish(EntityType.CLUSTER, cluster);
-        feed1.setGroups("group7,group8,group9");
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.setLocations(new Locations());
-        feed1.getLocations().getLocations().add(location);
-        getStore().publish(EntityType.FEED, feed1);
-
-        Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed2.setName("f2");
-        feed2.setGroups("group7,group8,group10");
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/${MONTH}/${DAY}/ad2");
-        location.setType(LocationType.DATA);
-        feed2.setLocations(new Locations());
-        feed2.getLocations().getLocations().add(location);
-        getStore().publish(EntityType.FEED, feed2);
-
-        Map<String, FeedGroup> groupMapping = FeedGroupMap.get()
-                .getGroupsMapping();
-
-        getStore().remove(EntityType.FEED, "f2");
-
-        FeedGroup group = groupMapping.get("group7");
-        Assert.assertEquals(group.getName(), "group7");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-
-        group = groupMapping.get("group8");
-        Assert.assertEquals(group.getName(), "group8");
-        Assert.assertEquals(group.getFeeds().size(), 1);
-
-        group = groupMapping.get("group10");
-        Assert.assertEquals(null, group);
-
-        getStore().remove(EntityType.FEED, "f1");
-
-        group = groupMapping.get("group7");
-        Assert.assertEquals(null, group);
-
-        group = groupMapping.get("group8");
-        Assert.assertEquals(null, group);
-
-        group = groupMapping.get("group9");
-        Assert.assertEquals(null, group);
-
-    }
-
-    @Test
-    public void testNullGroup() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("f5" + System.currentTimeMillis());
-        getStore().publish(EntityType.CLUSTER, cluster);
-        feed1.setGroups(null);
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.setLocations(new Locations());
-        feed1.getLocations().getLocations().add(location);
-        getStore().publish(EntityType.FEED, feed1);
-
-    }
-
-    private void assertFields(FeedGroup group, Feed feed) {
-        Assert.assertEquals(group.getFrequency(), feed.getFrequency());
-        Assert.assertEquals(group.getDatePattern(),
-                "[${DAY}, ${MONTH}, ${YEAR}]");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/hadoop/HadoopClientFactoryTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/hadoop/HadoopClientFactoryTest.java b/common/src/test/java/org/apache/falcon/hadoop/HadoopClientFactoryTest.java
deleted file mode 100644
index 6ef2710..0000000
--- a/common/src/test/java/org/apache/falcon/hadoop/HadoopClientFactoryTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hadoop;
-
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.net.URI;
-
-/**
- * Unit tests for HadoopClientFactory that doles out FileSystem handles.
- */
-public class HadoopClientFactoryTest {
-
-    private EmbeddedCluster embeddedCluster;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        embeddedCluster = EmbeddedCluster.newCluster(getClass().getSimpleName());
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        if (embeddedCluster != null) {
-            embeddedCluster.shutdown();
-        }
-    }
-
-    @Test
-    public void testGet() throws Exception {
-        HadoopClientFactory clientFactory = HadoopClientFactory.get();
-        Assert.assertNotNull(clientFactory);
-    }
-
-    @Test (enabled = false) // todo: cheated the conf to impersonate as same user
-    public void testCreateFileSystemWithSameUser() {
-        String user = System.getProperty("user.name");
-        CurrentUser.authenticate(user);
-        try {
-            Configuration conf = embeddedCluster.getConf();
-            URI uri = new URI(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
-            Assert.assertNotNull(uri);
-            HadoopClientFactory.get().createFileSystem(CurrentUser.getProxyUGI(), uri, conf);
-            Assert.fail("Impersonation should have failed.");
-        } catch (Exception e) {
-            Assert.assertEquals(e.getCause().getClass(), RemoteException.class);
-        }
-    }
-
-    @Test
-    public void testCreateFileSystem() throws Exception {
-        Configuration conf = embeddedCluster.getConf();
-
-        UserGroupInformation.setConfiguration(conf);
-        UserGroupInformation realUser = UserGroupInformation.createUserForTesting(
-                FalconTestUtil.TEST_USER_2, new String[]{"testgroup"});
-        UserGroupInformation.createProxyUserForTesting("proxyuser", realUser, new String[]{"proxygroup"});
-
-        URI uri = new URI(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
-        Assert.assertNotNull(uri);
-        FileSystem fs = HadoopClientFactory.get().createFileSystem(realUser, uri, conf);
-        Assert.assertNotNull(fs);
-    }
-
-    @Test
-    public void testCreateFileSystemWithUser() throws Exception {
-        Configuration conf = embeddedCluster.getConf();
-
-        UserGroupInformation realUser = UserGroupInformation.createUserForTesting(
-                FalconTestUtil.TEST_USER_2, new String[]{"testgroup"});
-        UserGroupInformation.createProxyUserForTesting("proxyuser", realUser, new String[]{"proxygroup"});
-        UserGroupInformation.setConfiguration(conf);
-
-        URI uri = new URI(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
-        Assert.assertNotNull(uri);
-
-        CurrentUser.authenticate(System.getProperty("user.name"));
-        FileSystem fs = HadoopClientFactory.get().createFileSystem(CurrentUser.getProxyUGI(), uri, conf);
-        Assert.assertNotNull(fs);
-    }
-}


[40/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/ProcessHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/ProcessHelper.java b/common/src/main/java/org/apache/falcon/entity/ProcessHelper.java
deleted file mode 100644
index bbfca68..0000000
--- a/common/src/main/java/org/apache/falcon/entity/ProcessHelper.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Helper methods for accessing process members.
- */
-public final class ProcessHelper {
-
-    private ProcessHelper() {}
-
-
-    public static Cluster getCluster(Process process, String clusterName) {
-        for (Cluster cluster : process.getClusters().getClusters()) {
-            if (cluster.getName().equals(clusterName)) {
-                return cluster;
-            }
-        }
-        return null;
-    }
-
-    public static String getProcessWorkflowName(String workflowName, String processName) {
-        return StringUtils.isEmpty(workflowName) ? processName + "-workflow" : workflowName;
-    }
-
-    public static Storage.TYPE getStorageType(org.apache.falcon.entity.v0.cluster.Cluster cluster,
-                                              Process process) throws FalconException {
-        Storage.TYPE storageType = Storage.TYPE.FILESYSTEM;
-        if (process.getInputs() == null && process.getOutputs() == null) {
-            return storageType;
-        }
-
-        if (process.getInputs() != null) {
-            for (Input input : process.getInputs().getInputs()) {
-                Feed feed = EntityUtil.getEntity(EntityType.FEED, input.getFeed());
-                storageType = FeedHelper.getStorageType(feed, cluster);
-                if (Storage.TYPE.TABLE == storageType) {
-                    break;
-                }
-            }
-        }
-
-        // If input feeds storage type is file system check storage type of output feeds
-        if (process.getOutputs() != null && Storage.TYPE.FILESYSTEM == storageType) {
-            for (Output output : process.getOutputs().getOutputs()) {
-                Feed feed = EntityUtil.getEntity(EntityType.FEED, output.getFeed());
-                storageType = FeedHelper.getStorageType(feed, cluster);
-                if (Storage.TYPE.TABLE == storageType) {
-                    break;
-                }
-            }
-        }
-
-        return storageType;
-    }
-
-    private static void validateProcessInstance(Process process, Date instanceTime,
-                                                org.apache.falcon.entity.v0.cluster.Cluster cluster) {
-        //validate the cluster
-        Cluster processCluster = getCluster(process, cluster.getName());
-        if (processCluster == null) {
-            throw new IllegalArgumentException("Cluster provided: " + cluster.getName()
-                    + " is not a valid cluster for the process: " + process.getName());
-        }
-
-        // check if instanceTime is in validity range
-        if (instanceTime.before(processCluster.getValidity().getStart())
-                || !instanceTime.before(processCluster.getValidity().getEnd())) {
-            throw new IllegalArgumentException("Instance time provided: " + instanceTime
-                    + " is not in validity range of process: " + process.getName()
-                    + "on cluster: " + cluster.getName());
-        }
-
-        // check instanceTime is valid on the basis of startTime and frequency
-        Date nextInstance = EntityUtil.getNextStartTime(processCluster.getValidity().getStart(),
-                process.getFrequency(), process.getTimezone(), instanceTime);
-        if (!nextInstance.equals(instanceTime)) {
-            throw new IllegalArgumentException("Instance time provided: " + instanceTime
-                    + " for process: " + process.getName() + " is not a valid instance time on cluster: "
-                    + cluster.getName() + " on the basis of startDate and frequency");
-        }
-    }
-
-    /**
-     * Given a process instance, returns the feed instances which are used as input for this process instance.
-     *
-     * @param process            given process
-     * @param instanceTime       nominal time of the process instance
-     * @param cluster            - cluster for the process instance
-     * @param allowOptionalFeeds switch to indicate whether optional feeds should be considered in input feeds.
-     * @return Set of input feed instances which are consumed by the given process instance.
-     * @throws org.apache.falcon.FalconException
-     */
-    public static Set<SchedulableEntityInstance> getInputFeedInstances(Process process, Date instanceTime,
-               org.apache.falcon.entity.v0.cluster.Cluster cluster, boolean allowOptionalFeeds) throws FalconException {
-
-        // validate the inputs
-        validateProcessInstance(process, instanceTime, cluster);
-
-        Set<SchedulableEntityInstance> result = new HashSet<>();
-        if (process.getInputs() != null) {
-            ConfigurationStore store = ConfigurationStore.get();
-            for (Input i : process.getInputs().getInputs()) {
-                if (i.isOptional() && !allowOptionalFeeds) {
-                    continue;
-                }
-                Feed feed = store.get(EntityType.FEED, i.getFeed());
-                // inputStart is process instance time + (now - startTime)
-                ExpressionHelper evaluator = ExpressionHelper.get();
-                ExpressionHelper.setReferenceDate(instanceTime);
-                Date inputInstanceStartDate = evaluator.evaluate(i.getStart(), Date.class);
-                Date inputInstanceEndDate = evaluator.evaluate(i.getEnd(), Date.class);
-                List<Date> instanceTimes = EntityUtil.getEntityInstanceTimes(feed, cluster.getName(),
-                        inputInstanceStartDate, inputInstanceEndDate);
-                SchedulableEntityInstance instance;
-                for (Date time : instanceTimes) {
-                    instance = new SchedulableEntityInstance(feed.getName(), cluster.getName(), time, EntityType.FEED);
-                    instance.setTags(SchedulableEntityInstance.INPUT);
-                    result.add(instance);
-                }
-            }
-        }
-        return result;
-    }
-
-    public static Set<SchedulableEntityInstance> getOutputFeedInstances(Process process, Date instanceTime,
-                                        org.apache.falcon.entity.v0.cluster.Cluster cluster) throws FalconException {
-        Set<SchedulableEntityInstance> result = new HashSet<>();
-
-        // validate the inputs
-        validateProcessInstance(process, instanceTime, cluster);
-
-        if (process.getOutputs() != null && process.getOutputs().getOutputs() != null) {
-
-            ExpressionHelper.setReferenceDate(instanceTime);
-            ExpressionHelper evaluator = ExpressionHelper.get();
-            SchedulableEntityInstance candidate;
-            ConfigurationStore store = ConfigurationStore.get();
-            for (Output output : process.getOutputs().getOutputs()) {
-
-                Date outputInstance = evaluator.evaluate(output.getInstance(), Date.class);
-                // find the feed
-                Feed feed = store.get(EntityType.FEED, output.getFeed());
-                org.apache.falcon.entity.v0.feed.Cluster fCluster = FeedHelper.getCluster(feed, cluster.getName());
-                outputInstance = EntityUtil.getPreviousInstanceTime(fCluster.getValidity().getStart(),
-                        feed.getFrequency(), feed.getTimezone(), outputInstance);
-                candidate = new SchedulableEntityInstance(output.getFeed(), cluster.getName(), outputInstance,
-                        EntityType.FEED);
-                candidate.setTags(SchedulableEntityInstance.OUTPUT);
-                result.add(candidate);
-            }
-        }
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/Storage.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/Storage.java b/common/src/main/java/org/apache/falcon/entity/Storage.java
deleted file mode 100644
index 3dc8f67..0000000
--- a/common/src/main/java/org/apache/falcon/entity/Storage.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.fs.Path;
-
-import java.util.Date;
-import java.util.List;
-
-/**
- * A class to encapsulate the storage for a given feed which can either be
- * expressed as a path on the file system or a table in a catalog.
- */
-public interface Storage extends Configurable {
-
-    String DOLLAR_EXPR_START_REGEX = "\\$\\{";
-    String QUESTION_EXPR_START_REGEX = "\\?\\{";
-    String EXPR_CLOSE_REGEX = "\\}";
-
-    /**
-     * URI Friendly expression.
-     */
-    String DOLLAR_EXPR_START_NORMALIZED = "_D__START_";
-    String EXPR_CLOSE_NORMALIZED = "_CLOSE_";
-
-    /**
-     * Enumeration for the various storage types.
-     */
-    enum TYPE {FILESYSTEM, TABLE}
-
-    /**
-     * Return the type of storage.
-     *
-     * @return storage type
-     */
-    TYPE getType();
-
-    /**
-     * Return the uri template.
-     *
-     * @return uri template
-     */
-    String getUriTemplate();
-
-    /**
-     * Return the uri template for a given location type.
-     *
-     * @param locationType type of location, applies only to filesystem type
-     * @return uri template
-     */
-    String getUriTemplate(LocationType locationType);
-
-    /**
-     * Check for equality of this instance against the one in question.
-     *
-     * @param toCompareAgainst instance to compare
-     * @return true if identical else false
-     * @throws FalconException an exception
-     */
-    boolean isIdentical(Storage toCompareAgainst) throws FalconException;
-
-    /**
-     * Check the permission on the storage, regarding owner/group/permission coming from ACL.
-     *
-     * @param acl the ACL defined in the entity.
-     * @throws FalconException if the permissions are not valid.
-     */
-    void validateACL(AccessControlList acl) throws FalconException;
-
-    /**
-     * Get Feed Listing for a feed between a date range.
-     */
-    List<FeedInstanceStatus> getListing(Feed feed, String cluster, LocationType locationType,
-                                        Date start, Date end) throws FalconException;
-
-
-    /**
-     * Checks the availability status for a given feed instance.
-     */
-    FeedInstanceStatus.AvailabilityStatus getInstanceAvailabilityStatus(Feed feed, String clusterName,
-                                                                        LocationType locationType,
-                                                                        Date instanceTime) throws FalconException;
-
-
-    /**
-     * Delete the instances of the feeds which are older than the retentionLimit specified.
-     *
-     * @param retentionLimit - retention limit of the feed e.g. hours(5).
-     * @param timeZone - timeZone for the feed definition.
-     * @param logFilePath - logFile to be used to record the deleted instances.
-     * @return - StringBuffer containing comma separated list of dates for the deleted instances.
-     * @throws FalconException
-     */
-    StringBuilder evict(String retentionLimit, String timeZone, Path logFilePath) throws FalconException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/WorkflowNameBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/WorkflowNameBuilder.java b/common/src/main/java/org/apache/falcon/entity/WorkflowNameBuilder.java
deleted file mode 100644
index c58be64..0000000
--- a/common/src/main/java/org/apache/falcon/entity/WorkflowNameBuilder.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity;
-
-import org.apache.falcon.Pair;
-import org.apache.falcon.Tag;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Convenient builder for workflow name.
- * @param <T>
- */
-public class WorkflowNameBuilder<T extends Entity> {
-    private static final String PREFIX = "FALCON";
-
-    private T entity;
-    private Tag tag;
-    private List<String> suffixes;
-
-    public WorkflowNameBuilder(T entity) {
-        this.entity = entity;
-    }
-
-    public void setTag(Tag tag) {
-        this.tag = tag;
-    }
-
-    public void setSuffixes(List<String> suffixes) {
-        this.suffixes = suffixes;
-    }
-
-    public WorkflowName getWorkflowName() {
-        return new WorkflowName(PREFIX, entity.getEntityType().name(),
-                tag == null ? null : tag.name(), entity.getName(),
-                suffixes == null ? new ArrayList<String>() : suffixes);
-    }
-
-    public Tag getWorkflowTag(String workflowName) {
-        return WorkflowName.getTagAndSuffixes(workflowName) == null ? null
-                : WorkflowName.getTagAndSuffixes(workflowName).first;
-    }
-
-    public String getWorkflowSuffixes(String workflowName) {
-        return WorkflowName.getTagAndSuffixes(workflowName) == null ? ""
-                : WorkflowName.getTagAndSuffixes(workflowName).second;
-    }
-
-    /**
-     * Workflow name.
-     */
-    public static class WorkflowName {
-        private static final String SEPARATOR = "_";
-        private static final Pattern WF_NAME_PATTERN;
-
-        private String prefix;
-        private String entityType;
-        private String tag;
-        private String entityName;
-        private List<String> suffixes;
-
-        static {
-            StringBuilder typePattern = new StringBuilder("(");
-            for (EntityType type : EntityType.values()) {
-                typePattern.append(type.name());
-                typePattern.append("|");
-            }
-            typePattern = typePattern.deleteCharAt(typePattern.length() - 1);
-            typePattern.append(")");
-            StringBuilder tagsPattern = new StringBuilder("(");
-            for (Tag tag : Tag.values()) {
-                tagsPattern.append(tag.name());
-                tagsPattern.append("|");
-            }
-            tagsPattern = tagsPattern.deleteCharAt(tagsPattern.length() - 1);
-            tagsPattern.append(")");
-
-            String name = "([a-zA-Z][\\-a-zA-Z0-9]*)";
-
-            String suffix = "([_A-Za-z0-9-.]*)";
-
-            String namePattern = PREFIX + SEPARATOR + typePattern + SEPARATOR + tagsPattern
-                    + SEPARATOR + name + suffix;
-
-            WF_NAME_PATTERN = Pattern.compile(namePattern);
-        }
-
-        public WorkflowName(String prefix, String entityType, String tag,
-                            String entityName, List<String> suffixes) {
-            this.prefix = prefix;
-            this.entityType = entityType;
-            this.tag = tag;
-            this.entityName = entityName;
-            this.suffixes = suffixes;
-        }
-
-        @Override
-        public String toString() {
-            StringBuilder builder = new StringBuilder();
-            builder.append(prefix).append(SEPARATOR).append(entityType)
-                    .append(tag == null ? "" : SEPARATOR + tag)
-                    .append(SEPARATOR).append(entityName);
-
-            for (String suffix : suffixes) {
-                builder.append(SEPARATOR).append(suffix);
-            }
-
-            return builder.toString();
-        }
-
-        public static Pair<Tag, String> getTagAndSuffixes(String workflowName) {
-            Matcher matcher = WF_NAME_PATTERN.matcher(workflowName);
-            if (matcher.matches()) {
-                matcher.reset();
-                if (matcher.find()) {
-                    String tag = matcher.group(2);
-                    String suffixes = matcher.group(4);
-                    return new Pair<>(Tag.valueOf(tag), suffixes);
-                }
-            }
-            return null;
-        }
-
-        public static Pair<String, EntityType> getEntityNameAndType(String workflowName) {
-            Matcher matcher = WF_NAME_PATTERN.matcher(workflowName);
-            if (matcher.matches()) {
-                matcher.reset();
-                if (matcher.find()) {
-                    String type = matcher.group(1);
-                    String name = matcher.group(3);
-                    return new Pair<>(name, EntityType.valueOf(type));
-                }
-            }
-            return null;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/common/FeedDataPath.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/common/FeedDataPath.java b/common/src/main/java/org/apache/falcon/entity/common/FeedDataPath.java
deleted file mode 100644
index 51568fb..0000000
--- a/common/src/main/java/org/apache/falcon/entity/common/FeedDataPath.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity.common;
-
-import java.util.Calendar;
-import java.util.regex.Pattern;
-
-/**
- * Helper to map feed path and the time component.
- */
-public final class FeedDataPath {
-
-    private FeedDataPath() {}
-
-    /**
-     * Standard variables for feed time components.
-     */
-    public enum VARS {
-        YEAR("([0-9]{4})", Calendar.YEAR, 4),
-        MONTH("(0[1-9]|1[0-2])", Calendar.MONTH, 2),
-        DAY("(0[1-9]|1[0-9]|2[0-9]|3[0-1])", Calendar.DAY_OF_MONTH, 2),
-        HOUR("([0-1][0-9]|2[0-4])", Calendar.HOUR_OF_DAY, 2),
-        MINUTE("([0-5][0-9]|60)", Calendar.MINUTE, 2);
-
-        private final Pattern pattern;
-        private final String valuePattern;
-        private final int calendarField;
-        private final int valueSize;
-
-        private VARS(String patternRegularExpression, int calField, int numDigits) {
-            pattern = Pattern.compile("\\$\\{" + name() + "\\}");
-            this.valuePattern = patternRegularExpression;
-            this.calendarField = calField;
-            this.valueSize = numDigits;
-        }
-
-        public String getValuePattern() {
-            return valuePattern;
-        }
-
-        public String regex() {
-            return pattern.pattern();
-        }
-
-        public int getCalendarField() {
-            return calendarField;
-        }
-
-        public int getValueSize() {
-            return valueSize;
-        }
-
-        public void setCalendar(Calendar cal, int value) {
-            if (this == MONTH) {
-                cal.set(calendarField, value - 1);
-            } else {
-                cal.set(calendarField, value);
-            }
-        }
-
-        public static VARS from(String str) {
-            for (VARS var : VARS.values()) {
-                if (var.pattern.matcher(str).matches()) {
-                    return var;
-                }
-            }
-            return null;
-        }
-    }
-
-    public static final Pattern PATTERN = Pattern.compile(VARS.YEAR.regex()
-            + "|" + VARS.MONTH.regex() + "|" + VARS.DAY.regex() + "|"
-            + VARS.HOUR.regex() + "|" + VARS.MINUTE.regex());
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/lock/MemoryLocks.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/lock/MemoryLocks.java b/common/src/main/java/org/apache/falcon/entity/lock/MemoryLocks.java
deleted file mode 100644
index 0cf2722..0000000
--- a/common/src/main/java/org/apache/falcon/entity/lock/MemoryLocks.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity.lock;
-
-import org.apache.falcon.entity.v0.Entity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * In memory resource locking that provides lock capabilities.
- */
-public final class MemoryLocks {
-    private static final Logger LOG = LoggerFactory.getLogger(MemoryLocks.class);
-    private static ConcurrentHashMap<String, Boolean> locks = new ConcurrentHashMap<String, Boolean>();
-
-    private static MemoryLocks instance = new MemoryLocks();
-
-    private MemoryLocks() {
-    }
-
-    public static MemoryLocks getInstance() {
-        return instance;
-    }
-
-    /**
-     * Obtain a lock for an entity.
-     *
-     * @param entity entity object.
-     * @return the lock token for the resource, or <code>null</code> if the lock could not be obtained.
-     */
-    public boolean acquireLock(Entity entity, String command) {
-        boolean lockObtained = false;
-        String entityName = getLockKey(entity);
-
-        Boolean putResponse = locks.putIfAbsent(entityName, true);
-        if (putResponse == null || !putResponse) {
-            LOG.info("Lock acquired for {} on {} by {}",
-                    command, entity.toShortString(), Thread.currentThread().getName());
-            lockObtained = true;
-        }
-        return lockObtained;
-    }
-
-    /**
-     * Release the lock for an entity.
-     *
-     * @param entity entity object.
-     */
-    public void releaseLock(Entity entity) {
-        String entityName = getLockKey(entity);
-
-        locks.remove(entityName);
-        LOG.info("Successfully released lock on {} by {}",
-                entity.toShortString(), Thread.currentThread().getName());
-    }
-
-    private String getLockKey(Entity entity) {
-        return entity.getEntityType().toString() + "." + entity.getName();
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
deleted file mode 100644
index bef4b39..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.lang.Validate;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.catalog.CatalogServiceFactory;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.ACL;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.entity.v0.cluster.Properties;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.security.SecurityUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.falcon.workflow.WorkflowEngineFactory;
-import org.apache.falcon.workflow.util.OozieConstants;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.jms.ConnectionFactory;
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashSet;
-import java.util.List;
-
-/**
- * Parser that parses cluster entity definition.
- */
-public class ClusterEntityParser extends EntityParser<Cluster> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ClusterEntityParser.class);
-
-    public ClusterEntityParser() {
-        super(EntityType.CLUSTER);
-    }
-
-    @Override
-    public void validate(Cluster cluster) throws ValidationException {
-        // validating scheme in light of fail-early
-        validateScheme(cluster, Interfacetype.READONLY);
-        validateScheme(cluster, Interfacetype.WRITE);
-        validateScheme(cluster, Interfacetype.WORKFLOW);
-        // User may choose to disable job completion notifications
-        if (ClusterHelper.getInterface(cluster, Interfacetype.MESSAGING) != null) {
-            validateScheme(cluster, Interfacetype.MESSAGING);
-        }
-        if (CatalogServiceFactory.isEnabled()
-                && ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY) != null) {
-            validateScheme(cluster, Interfacetype.REGISTRY);
-        }
-
-        validateACL(cluster);
-
-        if (!EntityUtil.responsibleFor(cluster.getColo())) {
-            return;
-        }
-
-        validateReadInterface(cluster);
-        validateWriteInterface(cluster);
-        validateExecuteInterface(cluster);
-        validateWorkflowInterface(cluster);
-        validateMessagingInterface(cluster);
-        validateRegistryInterface(cluster);
-        validateLocations(cluster);
-        validateProperties(cluster);
-    }
-
-    private void validateScheme(Cluster cluster, Interfacetype interfacetype)
-        throws ValidationException {
-        final String endpoint = ClusterHelper.getInterface(cluster, interfacetype).getEndpoint();
-        URI uri = new Path(endpoint).toUri();
-        if (uri.getScheme() == null) {
-            if (Interfacetype.WORKFLOW == interfacetype
-                    && uri.toString().equals(OozieConstants.LOCAL_OOZIE)) {
-                return;
-            }
-            throw new ValidationException("Cannot get valid scheme for interface: "
-                    + interfacetype + " of cluster: " + cluster.getName());
-        }
-    }
-
-    private void validateReadInterface(Cluster cluster) throws ValidationException {
-        final String readOnlyStorageUrl = ClusterHelper.getReadOnlyStorageUrl(cluster);
-        LOG.info("Validating read interface: {}", readOnlyStorageUrl);
-
-        validateFileSystem(cluster, readOnlyStorageUrl);
-    }
-
-    private void validateWriteInterface(Cluster cluster) throws ValidationException {
-        final String writeStorageUrl = ClusterHelper.getStorageUrl(cluster);
-        LOG.info("Validating write interface: {}", writeStorageUrl);
-
-        validateFileSystem(cluster, writeStorageUrl);
-    }
-
-    private void validateFileSystem(Cluster cluster, String storageUrl) throws ValidationException {
-        try {
-            Configuration conf = new Configuration();
-            conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, storageUrl);
-            conf.setInt("ipc.client.connect.max.retries", 10);
-
-            if (UserGroupInformation.isSecurityEnabled()) {
-                String nameNodePrincipal = ClusterHelper.getPropertyValue(cluster, SecurityUtil.NN_PRINCIPAL);
-                Validate.notEmpty(nameNodePrincipal,
-                    "Cluster definition missing required namenode credential property: " + SecurityUtil.NN_PRINCIPAL);
-
-                conf.set(SecurityUtil.NN_PRINCIPAL, nameNodePrincipal);
-            }
-
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(conf);
-            fs.exists(new Path("/"));
-        } catch (Exception e) {
-            throw new ValidationException("Invalid storage server or port: " + storageUrl
-                    + ", " + e.getMessage(), e);
-        }
-    }
-
-    private void validateExecuteInterface(Cluster cluster) throws ValidationException {
-        String executeUrl = ClusterHelper.getMREndPoint(cluster);
-        LOG.info("Validating execute interface: {}", executeUrl);
-
-        try {
-            HadoopClientFactory.get().validateJobClient(executeUrl);
-        } catch (IOException e) {
-            throw new ValidationException("Invalid Execute server or port: " + executeUrl, e);
-        }
-    }
-
-    protected void validateWorkflowInterface(Cluster cluster) throws ValidationException {
-        final String workflowUrl = ClusterHelper.getOozieUrl(cluster);
-        LOG.info("Validating workflow interface: {}", workflowUrl);
-        if (OozieConstants.LOCAL_OOZIE.equals(workflowUrl)) {
-            return;
-        }
-        try {
-            if (!WorkflowEngineFactory.getWorkflowEngine().isAlive(cluster)) {
-                throw new ValidationException("Unable to reach Workflow server:" + workflowUrl);
-            }
-        } catch (FalconException e) {
-            throw new ValidationException("Invalid Workflow server or port: " + workflowUrl, e);
-        }
-    }
-
-    protected void validateMessagingInterface(Cluster cluster) throws ValidationException {
-        // Validate only if user has specified this
-        final Interface messagingInterface = ClusterHelper.getInterface(cluster, Interfacetype.MESSAGING);
-        if (messagingInterface == null) {
-            LOG.info("Messaging service is not enabled for cluster: {}", cluster.getName());
-            return;
-        }
-
-        final String messagingUrl = ClusterHelper.getMessageBrokerUrl(cluster);
-        final String implementation = StartupProperties.get().getProperty("broker.impl.class",
-                "org.apache.activemq.ActiveMQConnectionFactory");
-        LOG.info("Validating messaging interface: {}, implementation: {}", messagingUrl, implementation);
-
-        try {
-            @SuppressWarnings("unchecked")
-            Class<ConnectionFactory> clazz = (Class<ConnectionFactory>)
-                    getClass().getClassLoader().loadClass(implementation);
-            ConnectionFactory connectionFactory = clazz.getConstructor(
-                    String.class, String.class, String.class).newInstance("", "", messagingUrl);
-            connectionFactory.createConnection();
-        } catch (Exception e) {
-            throw new ValidationException("Invalid Messaging server or port: " + messagingUrl
-                    + " for: " + implementation, e);
-        }
-    }
-
-    protected void validateRegistryInterface(Cluster cluster) throws ValidationException {
-        final boolean isCatalogRegistryEnabled = CatalogServiceFactory.isEnabled();
-        if (!isCatalogRegistryEnabled) {
-            return;  // ignore the registry interface for backwards compatibility
-        }
-
-        // continue validation only if a catalog service is provided
-        final Interface catalogInterface = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY);
-        if (catalogInterface == null) {
-            LOG.info("Catalog service is not enabled for cluster: {}", cluster.getName());
-            return;
-        }
-
-        final String catalogUrl = catalogInterface.getEndpoint();
-        LOG.info("Validating catalog registry interface: {}", catalogUrl);
-
-        try {
-            Configuration clusterConf = ClusterHelper.getConfiguration(cluster);
-            if (UserGroupInformation.isSecurityEnabled()) {
-                String metaStorePrincipal = clusterConf.get(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL);
-                Validate.notEmpty(metaStorePrincipal,
-                        "Cluster definition missing required metastore credential property: "
-                                + SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL);
-            }
-
-            if (!CatalogServiceFactory.getCatalogService().isAlive(clusterConf, catalogUrl)) {
-                throw new ValidationException("Unable to reach Catalog server:" + catalogUrl);
-            }
-        } catch (FalconException e) {
-            throw new ValidationException("Invalid Catalog server or port: " + catalogUrl, e);
-        }
-    }
-
-    /**
-     * Validate ACL if authorization is enabled.
-     *
-     * @param cluster cluster entity
-     * @throws ValidationException
-     */
-    private void validateACL(Cluster cluster) throws ValidationException {
-        if (isAuthorizationDisabled) {
-            return;
-        }
-
-        // Validate the entity owner is logged-in, authenticated user if authorization is enabled
-        final ACL clusterACL = cluster.getACL();
-        if (clusterACL == null) {
-            throw new ValidationException("Cluster ACL cannot be empty for:  " + cluster.getName());
-        }
-
-        validateACLOwnerAndGroup(clusterACL);
-
-        try {
-            authorize(cluster.getName(), clusterACL);
-        } catch (AuthorizationException e) {
-            throw new ValidationException(e);
-        }
-    }
-
-    /**
-     * Validate the locations on the cluster exists with appropriate permissions
-     * for the user to write to this directory.
-     *
-     * @param cluster cluster entity
-     * @throws ValidationException
-     */
-    protected void validateLocations(Cluster cluster) throws ValidationException {
-        Configuration conf = ClusterHelper.getConfiguration(cluster);
-        FileSystem fs;
-        try {
-            fs = HadoopClientFactory.get().createFalconFileSystem(conf);
-        } catch (FalconException e) {
-            throw new ValidationException("Unable to get file system handle for cluster " + cluster.getName(), e);
-        }
-
-        Location stagingLocation = ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING);
-        if (stagingLocation == null) {
-            throw new ValidationException(
-                    "Unable to find the mandatory location of name: " + ClusterLocationType.STAGING.value()
-                            + " for cluster " + cluster.getName());
-        } else {
-            checkPathOwnerAndPermission(cluster.getName(), stagingLocation.getPath(), fs,
-                    HadoopClientFactory.ALL_PERMISSION);
-            if (!ClusterHelper.checkWorkingLocationExists(cluster)) {
-                //Creating location type of working in the sub dir of staging dir with perms 755. FALCON-910
-                createWorkingDirUnderStaging(fs, cluster, stagingLocation);
-            } else {
-                Location workingLocation = ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING);
-                if (stagingLocation.getPath().equals(workingLocation.getPath())) {
-                    throw new ValidationException(
-                            "Location with name: " + stagingLocation.getName().value() + " and " + workingLocation
-                                    .getName().value() + " cannot have same path: " + stagingLocation.getPath()
-                                    + " for cluster :" + cluster.getName());
-                } else {
-                    checkPathOwnerAndPermission(cluster.getName(), workingLocation.getPath(), fs,
-                            HadoopClientFactory.READ_EXECUTE_PERMISSION);
-                }
-            }
-            // Create staging subdirs falcon/workflows/feed and falcon/workflows/process : Falcon-1647
-            createStagingSubdirs(fs, cluster, stagingLocation,
-                    "falcon/workflows/feed", HadoopClientFactory.ALL_PERMISSION);
-            createStagingSubdirs(fs, cluster, stagingLocation,
-                    "falcon/workflows/process", HadoopClientFactory.ALL_PERMISSION);
-        }
-    }
-
-    private void createWorkingDirUnderStaging(FileSystem fs, Cluster cluster,
-                                              Location stagingLocation) throws ValidationException {
-        Path workingDirPath = new Path(stagingLocation.getPath(), ClusterHelper.WORKINGDIR);
-        try {
-            if (!fs.exists(workingDirPath)) {  //Checking if the staging dir has the working dir to be created
-                HadoopClientFactory.mkdirs(fs, workingDirPath, HadoopClientFactory.READ_EXECUTE_PERMISSION);
-            } else {
-                if (fs.isDirectory(workingDirPath)) {
-                    FsPermission workingPerms = fs.getFileStatus(workingDirPath).getPermission();
-                    if (!workingPerms.equals(HadoopClientFactory.READ_EXECUTE_PERMISSION)) { //perms check
-                        throw new ValidationException(
-                                "Falcon needs subdir " + ClusterHelper.WORKINGDIR + " inside staging dir:"
-                                        + stagingLocation.getPath()
-                                        + " when staging location not specified with "
-                                        + HadoopClientFactory.READ_EXECUTE_PERMISSION.toString() + " got "
-                                        + workingPerms.toString());
-                    }
-                } else {
-                    throw new ValidationException(
-                            "Falcon needs subdir " + ClusterHelper.WORKINGDIR + " inside staging dir:"
-                                    + stagingLocation.getPath()
-                                    + " when staging location not specified. Got a file at " + workingDirPath
-                                    .toString());
-                }
-            }
-        } catch (IOException e) {
-            throw new ValidationException(
-                    "Unable to create path for " + workingDirPath.toString() + " with path: "
-                            + workingDirPath.toString() + " for cluster " + cluster.getName(), e);
-        }
-    }
-
-    private void createStagingSubdirs(FileSystem fs, Cluster cluster, Location stagingLocation,
-                                      String path, FsPermission permission) throws ValidationException {
-        Path subdirPath = new Path(stagingLocation.getPath(), path);
-        try {
-            HadoopClientFactory.mkdirs(fs, subdirPath, permission);
-        } catch (IOException e) {
-            throw new ValidationException(
-                    "Unable to create path "
-                            + subdirPath.toString() + " for cluster " + cluster.getName(), e);
-        }
-    }
-
-    protected void validateProperties(Cluster cluster) throws ValidationException {
-        Properties properties = cluster.getProperties();
-        if (properties == null) {
-            return; // Cluster has no properties to validate.
-        }
-
-        List<Property> propertyList = cluster.getProperties().getProperties();
-        HashSet<String> propertyKeys = new HashSet<String>();
-        for (Property prop : propertyList) {
-            if (StringUtils.isBlank(prop.getName())) {
-                throw new ValidationException("Property name and value cannot be empty for Cluster: "
-                        + cluster.getName());
-            }
-            if (!propertyKeys.add(prop.getName())) {
-                throw new ValidationException("Multiple properties with same name found for Cluster: "
-                        + cluster.getName());
-            }
-        }
-    }
-
-    private void checkPathOwnerAndPermission(String clusterName, String location, FileSystem fs,
-            FsPermission expectedPermission) throws ValidationException {
-
-        Path locationPath = new Path(location);
-        try {
-            if (!fs.exists(locationPath)) {
-                throw new ValidationException("Location " + location + " for cluster " + clusterName + " must exist.");
-            }
-
-            // falcon owns this path on each cluster
-            final String loginUser = UserGroupInformation.getLoginUser().getShortUserName();
-            FileStatus fileStatus = fs.getFileStatus(locationPath);
-            final String locationOwner = fileStatus.getOwner();
-            if (!locationOwner.equals(loginUser)) {
-                LOG.error("Owner of the location {} is {} for cluster {}. Current user {} is not the owner of the "
-                        + "location.", locationPath, locationOwner, clusterName, loginUser);
-                throw new ValidationException("Path [" + locationPath + "] on the cluster [" + clusterName + "] has "
-                        + "owner [" + locationOwner + "]. Current user [" + loginUser + "] is not the owner of the "
-                        + "path");
-            }
-            String errorMessage = "Path " + locationPath + " has permissions: " + fileStatus.getPermission().toString()
-                    + ", should be " + expectedPermission;
-            if (fileStatus.getPermission().toShort() != expectedPermission.toShort()) {
-                LOG.error(errorMessage);
-                throw new ValidationException(errorMessage);
-            }
-            // try to list to see if the user is able to write to this folder
-            fs.listStatus(locationPath);
-        } catch (IOException e) {
-            throw new ValidationException(
-                    "Unable to validate the location with path: " + location + " for cluster:" + clusterName
-                            + " due to transient failures ", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/CrossEntityValidations.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/CrossEntityValidations.java b/common/src/main/java/org/apache/falcon/entity/parser/CrossEntityValidations.java
deleted file mode 100644
index 18ae754..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/CrossEntityValidations.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.ProcessHelper;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.entity.v0.process.Validity;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.util.DateUtil;
-
-import java.util.Date;
-
-/**
- * Validation helper functions to validate across process, feed and cluster definitions.
- */
-public final class CrossEntityValidations {
-
-    private CrossEntityValidations() {}
-
-    public static void validateInstanceRange(Process process, Input input, Feed feed) throws FalconException {
-
-        try {
-            for (Cluster cluster : process.getClusters().getClusters()) {
-                String clusterName = cluster.getName();
-                org.apache.falcon.entity.v0.feed.Validity feedValidity = FeedHelper.getCluster(feed,
-                        clusterName).getValidity();
-
-                // Optinal end_date
-                if (feedValidity.getEnd() == null) {
-                    feedValidity.setEnd(DateUtil.NEVER);
-                }
-
-                Date feedStart = feedValidity.getStart();
-                Date feedEnd = feedValidity.getEnd();
-
-                String instStartEL = input.getStart();
-                String instEndEL = input.getEnd();
-                ExpressionHelper evaluator = ExpressionHelper.get();
-
-                Validity processValidity = ProcessHelper.getCluster(process, clusterName).getValidity();
-                ExpressionHelper.setReferenceDate(processValidity.getStart());
-                Date instStart = evaluator.evaluate(instStartEL, Date.class);
-                Date instEnd = evaluator.evaluate(instEndEL, Date.class);
-                if (instStart.before(feedStart)) {
-                    throw new ValidationException("Start instance  " + instStartEL + " of feed " + feed.getName()
-                            + " is before the start of feed " + feedValidity.getStart() + " for cluster "
-                            + clusterName);
-                }
-
-                if (instEnd.before(instStart)) {
-                    throw new ValidationException("End instance " + instEndEL + " for feed " + feed.getName()
-                            + " is before the start instance " + instStartEL + " for cluster " + clusterName);
-                }
-
-                if (instEnd.after(feedEnd)) {
-                    throw new ValidationException("End instance " + instEndEL + " for feed " + feed.getName()
-                            + " is after the end of feed " + feedValidity.getEnd() + " for cluster " + clusterName);
-                }
-            }
-        } catch (ValidationException e) {
-            throw e;
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    public static void validateFeedRetentionPeriod(String startInstance, Feed feed, String clusterName)
-        throws FalconException {
-
-        String feedRetention = FeedHelper.getCluster(feed, clusterName).getRetention().getLimit().toString();
-        ExpressionHelper evaluator = ExpressionHelper.get();
-
-        Date now = new Date();
-        ExpressionHelper.setReferenceDate(now);
-        Date instStart = evaluator.evaluate(startInstance, Date.class);
-        long feedDuration = evaluator.evaluate(feedRetention, Long.class);
-        Date feedStart = new Date(now.getTime() - feedDuration);
-
-        if (instStart.before(feedStart)) {
-            throw new ValidationException("StartInstance :" + startInstance + " of process is out of range for Feed: "
-                    + feed.getName() + "  in cluster: " + clusterName + "'s retention limit :" + feedRetention);
-        }
-    }
-
-    // Mapping to oozie coord's dataset fields
-    public static void validateInstance(Process process, Output output, Feed feed) throws FalconException {
-
-        try {
-            for (Cluster cluster : process.getClusters().getClusters()) {
-                String clusterName = cluster.getName();
-                org.apache.falcon.entity.v0.feed.Validity feedValidity = FeedHelper.getCluster(feed,
-                        clusterName).getValidity();
-                Date feedStart = feedValidity.getStart();
-                Date feedEnd = feedValidity.getEnd();
-
-                String instEL = output.getInstance();
-                ExpressionHelper evaluator = ExpressionHelper.get();
-                Validity processValidity = ProcessHelper.getCluster(process, clusterName).getValidity();
-                ExpressionHelper.setReferenceDate(processValidity.getStart());
-                Date inst = evaluator.evaluate(instEL, Date.class);
-                if (inst.before(feedStart)) {
-                    throw new ValidationException("Instance  " + instEL + " of feed " + feed.getName()
-                            + " is before the start of feed " + feedValidity.getStart() + " for cluster" + clusterName);
-                }
-
-                if (inst.after(feedEnd)) {
-                    throw new ValidationException("End instance " + instEL + " for feed " + feed.getName()
-                            + " is after the end of feed " + feedValidity.getEnd() + " for cluster" + clusterName);
-                }
-            }
-        } catch (ValidationException e) {
-            throw e;
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    public static void validateInputPartition(Input input, Feed feed) throws ValidationException {
-        String[] parts = input.getPartition().split("/");
-        if (feed.getPartitions() == null || feed.getPartitions().getPartitions().isEmpty()
-                || feed.getPartitions().getPartitions().size() < parts.length) {
-            throw new ValidationException("Partition specification in input " + input.getName() + " is wrong");
-        }
-    }
-
-    public static void validateFeedDefinedForCluster(Feed feed, String clusterName) throws FalconException {
-        if (FeedHelper.getCluster(feed, clusterName) == null) {
-            throw new ValidationException("Feed " + feed.getName() + " is not defined for cluster " + clusterName);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/DatasourceEntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/DatasourceEntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/DatasourceEntityParser.java
deleted file mode 100644
index 998f952..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/DatasourceEntityParser.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.DatasourceHelper;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.datasource.ACL;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.entity.v0.datasource.Interfacetype;
-import org.apache.falcon.util.HdfsClassLoader;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.util.Arrays;
-import java.util.Properties;
-
-/**
- * Parser for DataSource entity definition.
- */
-
-public class DatasourceEntityParser extends EntityParser<Datasource> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DatasourceEntityParser.class);
-
-    public DatasourceEntityParser() {
-        super(EntityType.DATASOURCE);
-    }
-
-    @Override
-    public void validate(Datasource db) throws FalconException {
-        try {
-            ClassLoader hdfsClassLoader = HdfsClassLoader.load(db.getName(), db.getDriver().getJars());
-            validateInterface(db, Interfacetype.READONLY, hdfsClassLoader);
-            validateInterface(db, Interfacetype.WRITE, hdfsClassLoader);
-            validateACL(db);
-        } catch(IOException io) {
-            throw new ValidationException("Unable to copy driver jars to local dir: "
-                    + Arrays.toString(db.getDriver().getJars().toArray()));
-        }
-    }
-
-    private static void validateInterface(Datasource db, Interfacetype interfacetype, ClassLoader hdfsClassLoader)
-        throws ValidationException {
-        String endpoint = null;
-        Properties userPasswdInfo = null;
-        try {
-            if (interfacetype == Interfacetype.READONLY) {
-                endpoint = DatasourceHelper.getReadOnlyEndpoint(db);
-                userPasswdInfo = DatasourceHelper.fetchReadPasswordInfo(db);
-            } else if (interfacetype == Interfacetype.WRITE) {
-                endpoint = DatasourceHelper.getWriteEndpoint(db);
-                userPasswdInfo = DatasourceHelper.fetchWritePasswordInfo(db);
-            }
-            if (StringUtils.isNotBlank(endpoint)) {
-                LOG.info("Validating {} endpoint {} connection.", interfacetype.value(), endpoint);
-                validateConnection(hdfsClassLoader, db.getDriver().getClazz(), endpoint, userPasswdInfo);
-            }
-        } catch(FalconException fe) {
-            throw new ValidationException(String.format("Cannot validate '%s' "
-                            + "interface '%s' " + "of database entity '%s' due to '%s' ",
-                   interfacetype, endpoint,
-                   db.getName(), fe.getMessage()));
-        }
-    }
-
-    private static void validateConnection(ClassLoader hdfsClassLoader, String driverClass,
-                                    String connectUrl, Properties userPasswdInfo)
-        throws FalconException {
-        ClassLoader previousClassLoader = Thread.currentThread().getContextClassLoader();
-        LOG.info("Preserving current classloader: {}", previousClassLoader.toString());
-        try {
-            Thread.currentThread().setContextClassLoader(hdfsClassLoader);
-            LOG.info("Setting context classloader to : {}", hdfsClassLoader.toString());
-            java.sql.Driver driver = (java.sql.Driver) hdfsClassLoader.loadClass(driverClass).newInstance();
-            LOG.info("Validating connection URL: {} using driver: {}", connectUrl, driver.getClass().toString());
-            Connection con = driver.connect(connectUrl, userPasswdInfo);
-            if (con == null) {
-                throw new FalconException("DriverManager.getConnection() return "
-                       + "null for URL : " + connectUrl);
-            }
-        } catch (Exception ex) {
-            LOG.error("Exception while validating connection : ", ex);
-            throw new FalconException(ex);
-        } finally {
-            Thread.currentThread().setContextClassLoader(previousClassLoader);
-            LOG.info("Restoring original classloader {}", previousClassLoader.toString());
-        }
-    }
-
-    /**
-     * Validate ACL if authorization is enabled.
-     *
-     * @param  db database entity
-     * @throws ValidationException
-     */
-    private void validateACL(Datasource db) throws ValidationException {
-        if (isAuthorizationDisabled) {
-            return;
-        }
-
-        // Validate the entity owner is logged-in, authenticated user if authorization is enabled
-        final ACL dbACL = db.getACL();
-        if (dbACL == null) {
-            throw new ValidationException("Datasource ACL cannot be empty for:  " + db.getName());
-        }
-
-        validateACLOwnerAndGroup(dbACL);
-
-        try {
-            authorize(db.getName(), dbACL);
-        } catch (AuthorizationException e) {
-            throw new ValidationException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/EntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/EntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/EntityParser.java
deleted file mode 100644
index 05b204d..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/EntityParser.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.xml.bind.Unmarshaller;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Generic Abstract Entity Parser, the concrete FEED, PROCESS and CLUSTER should extend this parser
- * to implement specific parsing.
- *
- * @param <T> of type Entity
- */
-public abstract class EntityParser<T extends Entity> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EntityParser.class);
-
-    private final EntityType entityType;
-    protected final boolean isAuthorizationDisabled;
-
-    protected EntityParser(EntityType entityType) {
-        this.entityType = entityType;
-        isAuthorizationDisabled = !SecurityUtil.isAuthorizationEnabled();
-    }
-
-    public EntityType getEntityType() {
-        return this.entityType;
-    }
-
-    /**
-     * Parses a sent XML and validates it using JAXB.
-     *
-     * @param xmlString - Entity XML
-     * @return Entity - JAVA Object
-     * @throws FalconException
-     */
-    public Entity parseAndValidate(String xmlString) throws FalconException {
-        InputStream inputStream = null;
-        try {
-            inputStream = new ByteArrayInputStream(xmlString.getBytes());
-            return parseAndValidate(inputStream);
-        } finally {
-            IOUtils.closeQuietly(inputStream);
-        }
-    }
-
-    /**
-     * Parses xml stream.
-     *
-     * @param xmlStream stream
-     * @return entity
-     * @throws FalconException
-     */
-    @SuppressWarnings("unchecked")
-    public T parse(InputStream xmlStream) throws FalconException {
-        try {
-            // parse against schema
-            Unmarshaller unmarshaller = entityType.getUnmarshaller();
-            T entity = (T) unmarshaller.unmarshal(xmlStream);
-            LOG.info("Parsed Entity: {}", entity.getName());
-            return entity;
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    public T parseAndValidate(InputStream xmlStream) throws FalconException {
-        T entity = parse(xmlStream);
-        validate(entity);
-        return entity;
-    }
-
-    protected void validateEntityExists(EntityType type, String name) throws FalconException {
-        if (ConfigurationStore.get().get(type, name) == null) {
-            throw new ValidationException("Referenced " + type + " " + name + " is not registered");
-        }
-    }
-
-    public abstract void validate(T entity) throws FalconException;
-
-    /**
-     * Checks if the acl owner is a valid user by fetching the groups for the owner.
-     * Also checks if the acl group is one of the fetched groups for membership.
-     * The only limitation is that a user cannot add a group in ACL that he does not belong to.
-     *
-     * @param acl  entity ACL
-     * @throws org.apache.falcon.entity.parser.ValidationException
-     */
-    protected void validateACLOwnerAndGroup(AccessControlList acl) throws ValidationException {
-        String aclOwner = acl.getOwner();
-        String aclGroup = acl.getGroup();
-
-        try {
-            UserGroupInformation proxyACLUser = UserGroupInformation.createProxyUser(
-                    aclOwner, UserGroupInformation.getLoginUser());
-            Set<String> groups = new HashSet<String>(Arrays.asList(proxyACLUser.getGroupNames()));
-            if (!groups.contains(aclGroup)) {
-                throw new AuthorizationException("Invalid group: " + aclGroup
-                        + " for user: " + aclOwner);
-            }
-        } catch (IOException e) {
-            throw new ValidationException("Invalid acl owner " + aclOwner
-                    + ", does not exist or does not belong to group: " + aclGroup);
-        }
-    }
-
-    /**
-     * Validate if the entity owner is the logged-in authenticated user.
-     *
-     * @param entityName  entity name
-     * @param acl         entity ACL
-     * @throws AuthorizationException
-     */
-    protected void authorize(String entityName,
-                             AccessControlList acl) throws AuthorizationException {
-        try {
-            SecurityUtil.getAuthorizationProvider().authorizeEntity(entityName,
-                    getEntityType().name(), acl, "submit", CurrentUser.getAuthenticatedUGI());
-        } catch (FalconException e) {
-            throw new AuthorizationException(e);
-        } catch (IOException e) {
-            throw new AuthorizationException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/EntityParserFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/EntityParserFactory.java b/common/src/main/java/org/apache/falcon/entity/parser/EntityParserFactory.java
deleted file mode 100644
index b497770..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/EntityParserFactory.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.entity.v0.EntityType;
-
-/**
- * Factory Class which returns the Parser based on the EntityType.
- */
-public final class EntityParserFactory {
-
-    private EntityParserFactory() {
-    }
-
-    /**
-     * Tie EnityType with the Entity Class in one place so that it can be
-     * unmarshalled easily by concrete classes based on the class type using
-     * JAXB.
-     *
-     * @param entityType - entity type
-     * @return concrete parser based on entity type
-     */
-    public static EntityParser getParser(final EntityType entityType) {
-
-        switch (entityType) {
-        case PROCESS:
-            return new ProcessEntityParser();
-        case FEED:
-            return new FeedEntityParser();
-        case CLUSTER:
-            return new ClusterEntityParser();
-        case DATASOURCE:
-            return new DatasourceEntityParser();
-        default:
-            throw new IllegalArgumentException("Unhandled entity type: " + entityType);
-        }
-    }
-
-}


[36/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java b/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
deleted file mode 100644
index 887164e..0000000
--- a/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
+++ /dev/null
@@ -1,335 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang.Validate;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.EntityNotRegisteredException;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Default implementation of AuthorizationProvider in Falcon.
- *
- * The authorization is enforced in the following way:
- *
- * if admin resource,
- *      if authenticated user name matches the admin users configuration
- *      Else if groups of the authenticated user matches the admin groups configuration
- * Else if entities or instance resource
- *      if the authenticated user matches the owner in ACL for the entity
- *      Else if the groups of the authenticated user matches the group in ACL for the entity
- * Else if lineage resource
- *      All have read-only permissions
- * Else bad resource
- */
-public class DefaultAuthorizationProvider implements AuthorizationProvider {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DefaultAuthorizationProvider.class);
-
-    private static final Set<String> RESOURCES = new HashSet<String>(
-            Arrays.asList(new String[]{"admin", "entities", "instance", "metadata", }));
-
-    /**
-     * Constant for the configuration property that indicates the prefix.
-     */
-    protected static final String FALCON_PREFIX = "falcon.security.authorization.";
-
-    /**
-     * Constant for the configuration property that indicates the blacklisted super users for falcon.
-     */
-    private static final String ADMIN_USERS_KEY = FALCON_PREFIX + "admin.users";
-    private static final String ADMIN_GROUPS_KEY = FALCON_PREFIX + "admin.groups";
-
-    /**
-     * The super-user is the user with the same identity as falcon process itself.
-     * Loosely, if you started falcon, then you are the super-user.
-     */
-    protected static final String SUPER_USER = System.getProperty("user.name");
-
-    /**
-     * Constant for the configuration property that indicates the super user group.
-     */
-    private static final String SUPER_USER_GROUP_KEY = FALCON_PREFIX + "superusergroup";
-
-    /**
-     * Super user group.
-     */
-    private final String superUserGroup;
-    private final Set<String> adminUsers;
-    private final Set<String> adminGroups;
-
-    public DefaultAuthorizationProvider() {
-        superUserGroup = StartupProperties.get().getProperty(SUPER_USER_GROUP_KEY);
-        adminUsers = getAdminNamesFromConfig(ADMIN_USERS_KEY);
-        adminGroups = getAdminNamesFromConfig(ADMIN_GROUPS_KEY);
-    }
-
-    private Set<String> getAdminNamesFromConfig(String key) {
-        Set<String> adminNames = new HashSet<String>();
-        String adminNamesConfig = StartupProperties.get().getProperty(key);
-        if (!StringUtils.isEmpty(adminNamesConfig)) {
-            adminNames.addAll(Arrays.asList(adminNamesConfig.split(",")));
-        }
-
-        return Collections.unmodifiableSet(adminNames);
-    }
-
-    /**
-     * Determines if the authenticated user is the user who started this process
-     * or belongs to the super user group.
-     *
-     * @param authenticatedUGI UGI
-     * @return true if super user else false.
-     */
-    public boolean isSuperUser(UserGroupInformation authenticatedUGI) {
-        return SUPER_USER.equals(authenticatedUGI.getShortUserName())
-            || (!StringUtils.isEmpty(superUserGroup)
-                    && isUserInGroup(superUserGroup, authenticatedUGI));
-    }
-
-    /**
-     * Checks if authenticated user should proxy the entity acl owner.
-     *
-     * @param authenticatedUGI  proxy ugi for the authenticated user.
-     * @param aclOwner          entity ACL Owner.
-     * @param aclGroup          entity ACL group.
-     * @throws IOException
-     */
-    @Override
-    public boolean shouldProxy(UserGroupInformation authenticatedUGI,
-                               final String aclOwner,
-                               final String aclGroup) throws IOException {
-        Validate.notNull(authenticatedUGI, "User cannot be empty or null");
-        Validate.notEmpty(aclOwner, "User cannot be empty or null");
-        Validate.notEmpty(aclGroup, "Group cannot be empty or null");
-
-        return isSuperUser(authenticatedUGI)
-            || (!isUserACLOwner(authenticatedUGI.getShortUserName(), aclOwner)
-                    && isUserInGroup(aclGroup, authenticatedUGI));
-    }
-
-    /**
-     * Determines if the authenticated user is authorized to execute the action on the resource.
-     * Throws an exception if not authorized.
-     *
-     * @param resource   api resource, admin, entities or instance
-     * @param action     action being authorized on resource and entity if applicable
-     * @param entityType entity type in question, not for admin resource
-     * @param entityName entity name in question, not for admin resource
-     * @param authenticatedUGI   proxy ugi for the authenticated user
-     * @throws org.apache.hadoop.security.authorize.AuthorizationException
-     */
-    @Override
-    public void authorizeResource(String resource, String action,
-                                  String entityType, String entityName,
-                                  UserGroupInformation authenticatedUGI)
-        throws AuthorizationException, EntityNotRegisteredException {
-
-        Validate.notEmpty(resource, "Resource cannot be empty or null");
-        Validate.isTrue(RESOURCES.contains(resource), "Illegal resource: " + resource);
-        Validate.notEmpty(action, "Action cannot be empty or null");
-
-        try {
-            if (isSuperUser(authenticatedUGI)) {
-                return;
-            }
-
-            if ("admin".equals(resource)) {
-                if (!("version".equals(action) || "clearuser".equals(action) || "getuser".equals(action))) {
-                    authorizeAdminResource(authenticatedUGI, action);
-                }
-            } else if ("entities".equals(resource) || "instance".equals(resource)) {
-                authorizeEntityResource(authenticatedUGI, entityName, entityType, action);
-            } else if ("metadata".equals(resource)) {
-                authorizeMetadataResource(authenticatedUGI, action);
-            }
-        } catch (IOException e) {
-            throw new AuthorizationException(e);
-        }
-    }
-
-    protected Set<String> getGroupNames(UserGroupInformation proxyUgi) {
-        return new HashSet<String>(Arrays.asList(proxyUgi.getGroupNames()));
-    }
-
-    /**
-     * Determines if the authenticated user is authorized to execute the action on the entity.
-     * Throws an exception if not authorized.
-     *
-     * @param entityName entity in question, applicable for entities and instance resource
-     * @param entityType entity in question, applicable for entities and instance resource
-     * @param acl        entity ACL
-     * @param action     action being authorized on resource and entity if applicable
-     * @param authenticatedUGI   proxy ugi for the authenticated user
-     * @throws org.apache.hadoop.security.authorize.AuthorizationException
-     */
-    @Override
-    public void authorizeEntity(String entityName, String entityType, AccessControlList acl,
-                                String action, UserGroupInformation authenticatedUGI)
-        throws AuthorizationException {
-
-        try {
-            LOG.info("Authorizing authenticatedUser={}, action={}, entity={}, type{}",
-                    authenticatedUGI.getShortUserName(), action, entityName, entityType);
-
-            if (isSuperUser(authenticatedUGI)) {
-                return;
-            }
-
-            checkUser(entityName, acl.getOwner(), acl.getGroup(), action, authenticatedUGI);
-        } catch (IOException e) {
-            throw new AuthorizationException(e);
-        }
-    }
-
-    /**
-     * Validate if the entity owner is the logged-in authenticated user.
-     *
-     * @param entityName        entity name.
-     * @param aclOwner          entity ACL Owner.
-     * @param aclGroup          entity ACL group.
-     * @param action            action being authorized on resource and entity if applicable.
-     * @param authenticatedUGI          proxy ugi for the authenticated user.
-     * @throws AuthorizationException
-     */
-    protected void checkUser(String entityName, String aclOwner, String aclGroup, String action,
-                             UserGroupInformation authenticatedUGI) throws AuthorizationException {
-        final String authenticatedUser = authenticatedUGI.getShortUserName();
-        if (isUserACLOwner(authenticatedUser, aclOwner)
-                || isUserInGroup(aclGroup, authenticatedUGI)) {
-            return;
-        }
-
-        StringBuilder message = new StringBuilder("Permission denied: authenticatedUser=");
-        message.append(authenticatedUser);
-        message.append(!authenticatedUser.equals(aclOwner)
-                ? " not entity owner=" + aclOwner
-                : " not in group=" + aclGroup);
-        message.append(", entity=").append(entityName).append(", action=").append(action);
-
-        LOG.error(message.toString());
-        throw new AuthorizationException(message.toString());
-    }
-
-    /**
-     * Determines if the authenticated user is the entity ACL owner.
-     *
-     * @param authenticatedUser authenticated user
-     * @param aclOwner          entity ACL owner
-     * @return true if authenticated user is the entity acl owner, false otherwise.
-     */
-    protected boolean isUserACLOwner(String authenticatedUser, String aclOwner) {
-        return authenticatedUser.equals(aclOwner);
-    }
-
-    /**
-     * Checks if the user's group matches the entity ACL group.
-     *
-     * @param group    Entity ACL group.
-     * @param proxyUgi proxy ugi for the authenticated user.
-     * @return true if user groups contains entity acl group.
-     */
-    protected boolean isUserInGroup(String group, UserGroupInformation proxyUgi) {
-        Set<String> groups = getGroupNames(proxyUgi);
-        return groups.contains(group);
-    }
-
-    /**
-     * Check if the user has admin privileges.
-     *
-     * @param authenticatedUGI proxy ugi for the authenticated user.
-     * @param action   admin action on the resource.
-     * @throws AuthorizationException if the user does not have admin privileges.
-     */
-    protected void authorizeAdminResource(UserGroupInformation authenticatedUGI,
-                                          String action) throws AuthorizationException {
-        final String authenticatedUser = authenticatedUGI.getShortUserName();
-        LOG.debug("Authorizing user={} for admin, action={}", authenticatedUser, action);
-        if (adminUsers.contains(authenticatedUser) || isUserInAdminGroups(authenticatedUGI)) {
-            return;
-        }
-
-        LOG.error("Permission denied: user {} does not have admin privilege for action={}",
-                authenticatedUser, action);
-        throw new AuthorizationException("Permission denied: user=" + authenticatedUser
-                + " does not have admin privilege for action=" + action);
-    }
-
-    protected boolean isUserInAdminGroups(UserGroupInformation proxyUgi) {
-        final Set<String> groups = getGroupNames(proxyUgi);
-        groups.retainAll(adminGroups);
-        return !groups.isEmpty();
-    }
-
-    protected void authorizeEntityResource(UserGroupInformation authenticatedUGI,
-                                           String entityName, String entityType,
-                                           String action)
-        throws AuthorizationException, EntityNotRegisteredException {
-
-        Validate.notEmpty(entityType, "Entity type cannot be empty or null");
-        LOG.debug("Authorizing authenticatedUser={} against entity/instance action={}, "
-                + "entity name={}, entity type={}",
-                authenticatedUGI.getShortUserName(), action, entityName, entityType);
-
-        if (entityName != null) { // lifecycle actions
-            Entity entity = getEntity(entityName, entityType);
-            authorizeEntity(entity.getName(), entity.getEntityType().name(),
-                entity.getACL(), action, authenticatedUGI);
-        } else {
-            // non lifecycle actions, lifecycle actions with null entity will validate later
-            LOG.info("Authorization for action={} will be done in the API", action);
-        }
-    }
-
-    private Entity getEntity(String entityName, String entityType)
-        throws EntityNotRegisteredException, AuthorizationException {
-
-        try {
-            EntityType type = EntityType.getEnum(entityType);
-            return EntityUtil.getEntity(type, entityName);
-        } catch (FalconException e) {
-            if (e instanceof EntityNotRegisteredException) {
-                throw (EntityNotRegisteredException) e;
-            } else {
-                throw new AuthorizationException(e);
-            }
-        }
-    }
-
-    protected void authorizeMetadataResource(UserGroupInformation authenticatedUGI,
-                                             String action) throws AuthorizationException {
-        LOG.debug("User {} authorized for action {} ", authenticatedUGI.getShortUserName(), action);
-        // todo - read-only for all metadata but needs to be implemented
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/SecurityUtil.java b/common/src/main/java/org/apache/falcon/security/SecurityUtil.java
deleted file mode 100644
index c187358..0000000
--- a/common/src/main/java/org/apache/falcon/security/SecurityUtil.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-/**
- * Security Util - bunch of security related helper methods.
- */
-public final class SecurityUtil {
-
-    /**
-     * Constant for the configuration property that indicates the prefix.
-     */
-    private static final String CONFIG_PREFIX = "falcon.authentication.";
-
-    /**
-     * Constant for the configuration property that indicates the authentication type.
-     */
-    public static final String AUTHENTICATION_TYPE = CONFIG_PREFIX + "type";
-
-    /**
-     * Constant for the configuration property that indicates the Name node principal.
-     */
-    public static final String NN_PRINCIPAL = "dfs.namenode.kerberos.principal";
-
-    /**
-     * Constant for the configuration property that indicates the Name node principal.
-     * This is used to talk to Hive Meta Store during parsing and validations only.
-     */
-    public static final String HIVE_METASTORE_KERBEROS_PRINCIPAL = "hive.metastore.kerberos.principal";
-
-    public static final String METASTORE_USE_THRIFT_SASL = "hive.metastore.sasl.enabled";
-
-    public static final String METASTORE_PRINCIPAL = "hcat.metastore.principal";
-
-    private static final Logger LOG = LoggerFactory.getLogger(SecurityUtil.class);
-
-    private SecurityUtil() {
-    }
-
-    public static String getAuthenticationType() {
-        return StartupProperties.get().getProperty(
-                AUTHENTICATION_TYPE, PseudoAuthenticationHandler.TYPE);
-    }
-
-    /**
-     * Checks if kerberos authentication is enabled in the configuration.
-     *
-     * @return true if falcon.authentication.type is kerberos, false otherwise
-     */
-    public static boolean isSecurityEnabled() {
-        String authenticationType = StartupProperties.get().getProperty(
-                AUTHENTICATION_TYPE, PseudoAuthenticationHandler.TYPE);
-
-        final boolean useKerberos;
-        if (authenticationType == null || PseudoAuthenticationHandler.TYPE.equals(authenticationType)) {
-            useKerberos = false;
-        } else if (KerberosAuthenticationHandler.TYPE.equals(authenticationType)) {
-            useKerberos = true;
-        } else {
-            throw new IllegalArgumentException("Invalid attribute value for "
-                    + AUTHENTICATION_TYPE + " of " + authenticationType);
-        }
-
-        return useKerberos;
-    }
-
-    public static String getLocalHostName() throws UnknownHostException {
-        return InetAddress.getLocalHost().getCanonicalHostName();
-    }
-
-    /**
-     * Checks if authorization is enabled in the configuration.
-     *
-     * @return true if falcon.security.authorization.enabled is enabled, false otherwise
-     */
-    public static boolean isAuthorizationEnabled() {
-        return Boolean.valueOf(StartupProperties.get().getProperty(
-                "falcon.security.authorization.enabled", "false"));
-    }
-
-    public static AuthorizationProvider getAuthorizationProvider() throws FalconException {
-        String providerClassName = StartupProperties.get().getProperty(
-                "falcon.security.authorization.provider",
-                "org.apache.falcon.security.DefaultAuthorizationProvider");
-        return ReflectionUtils.getInstanceByClassName(providerClassName);
-    }
-
-    public static void tryProxy(Entity entity, final String doAsUser) throws IOException, FalconException {
-        if (entity != null && entity.getACL() != null && SecurityUtil.isAuthorizationEnabled()) {
-            final String aclOwner = entity.getACL().getOwner();
-            final String aclGroup = entity.getACL().getGroup();
-
-            if (StringUtils.isNotEmpty(doAsUser)) {
-                if (!doAsUser.equalsIgnoreCase(aclOwner)) {
-                    LOG.warn("doAs user {} not same as acl owner {}. Ignoring acl owner.", doAsUser, aclOwner);
-                    throw new FalconException("doAs user and ACL owner mismatch. doAs user " + doAsUser
-                            +  " should be same as ACL owner " + aclOwner);
-                }
-                return;
-            }
-            if (SecurityUtil.getAuthorizationProvider().shouldProxy(
-                    CurrentUser.getAuthenticatedUGI(), aclOwner, aclGroup)) {
-                CurrentUser.proxy(aclOwner, aclGroup);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/ConfigurationChangeListener.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/ConfigurationChangeListener.java b/common/src/main/java/org/apache/falcon/service/ConfigurationChangeListener.java
deleted file mode 100644
index e20b0b5..0000000
--- a/common/src/main/java/org/apache/falcon/service/ConfigurationChangeListener.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-
-/**
- * Configuration change notification listener.
- */
-public interface ConfigurationChangeListener {
-
-    /**
-     * This is upon adding a new entity to Store.
-     *
-     * @param entity entity object
-     * @throws FalconException
-     */
-    void onAdd(Entity entity) throws FalconException;
-
-    /**
-     * This is upon removing an existing entity from the Store.
-     *
-     * @param entity entity object
-     * @throws FalconException
-     */
-    void onRemove(Entity entity) throws FalconException;
-
-    /**
-     * This is upon updating an entity to the store.
-     *
-     * @param oldEntity old entity object
-     * @param newEntity updated entity object
-     * @throws FalconException
-     */
-    void onChange(Entity oldEntity, Entity newEntity) throws FalconException;
-
-    /**
-     * This is when existing entities are read from the store during startup.
-     *
-     * @param entity entity object
-     * @throws FalconException
-     */
-    void onReload(Entity entity) throws FalconException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/FalconService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/FalconService.java b/common/src/main/java/org/apache/falcon/service/FalconService.java
deleted file mode 100644
index a1eb8e0..0000000
--- a/common/src/main/java/org/apache/falcon/service/FalconService.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Falcon service initialized at startup.
- */
-public interface FalconService {
-
-    String getName();
-
-    void init() throws FalconException;
-
-    void destroy() throws FalconException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/GroupsService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/GroupsService.java b/common/src/main/java/org/apache/falcon/service/GroupsService.java
deleted file mode 100644
index dd4d946..0000000
--- a/common/src/main/java/org/apache/falcon/service/GroupsService.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.Groups;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * The GroupsService class delegates to the Hadoop's <code>org.apache.hadoop.security.Groups</code>
- * to retrieve the groups a user belongs to.
- */
-public class GroupsService implements FalconService {
-    private org.apache.hadoop.security.Groups hGroups;
-
-    public static final String SERVICE_NAME = GroupsService.class.getSimpleName();
-
-    /**
-     * Initializes the service.
-     */
-    @Override
-    public void init() {
-        hGroups = new Groups(new Configuration(true));
-    }
-
-    /**
-     * Destroys the service.
-     */
-    @Override
-    public void destroy() {
-    }
-
-    @Override
-    public String getName() {
-        return SERVICE_NAME;
-    }
-
-    /**
-     * Returns the list of groups a user belongs to.
-     *
-     * @param user user name.
-     * @return the groups the given user belongs to.
-     * @throws java.io.IOException thrown if there was an error retrieving the groups of the user.
-     */
-    public List<String> getGroups(String user) throws IOException {
-        return hGroups.getGroups(user);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/LifecyclePolicyMap.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/LifecyclePolicyMap.java b/common/src/main/java/org/apache/falcon/service/LifecyclePolicyMap.java
deleted file mode 100644
index b8c979e..0000000
--- a/common/src/main/java/org/apache/falcon/service/LifecyclePolicyMap.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.lifecycle.FeedLifecycleStage;
-import org.apache.falcon.lifecycle.LifecyclePolicy;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Stores all internal and external feed lifecycle policies.
- */
-public final class LifecyclePolicyMap implements FalconService {
-    private static final Logger LOG = LoggerFactory.getLogger(LifecyclePolicyMap.class);
-    private static final LifecyclePolicyMap STORE = new LifecyclePolicyMap();
-
-    private final Map<String, LifecyclePolicy> policyMap = new HashMap<>();
-
-    private LifecyclePolicyMap() {}
-
-    public static LifecyclePolicyMap get() {
-        return STORE;
-    }
-
-    public LifecyclePolicy get(String policyName) {
-        return policyMap.get(policyName);
-    }
-
-    @Override
-    public String getName() {
-        return getClass().getSimpleName();
-    }
-
-    @Override
-    public void init() throws FalconException {
-        String[] policyNames = StartupProperties.get().getProperty("falcon.feed.lifecycle.policies").split(",");
-        for (String name : policyNames) {
-            LifecyclePolicy policy = ReflectionUtils.getInstanceByClassName(name);
-            LOG.debug("Loaded policy : {} for stage : {}", policy.getName(), policy.getStage());
-            policyMap.put(policy.getName(), policy);
-        }
-        validate();
-    }
-
-    @Override
-    public void destroy() throws FalconException {
-        policyMap.clear();
-    }
-
-    // validate that default policy for each stage is available
-    private void validate() throws FalconException {
-        for (FeedLifecycleStage stage : FeedLifecycleStage.values()) {
-            if (!policyMap.containsKey(stage.getDefaultPolicyName())) {
-                throw new FalconException("Default Policy: " + stage.getDefaultPolicyName()
-                        + " for stage: " + stage.name() + "was not found.");
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/LogCleanupService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/LogCleanupService.java b/common/src/main/java/org/apache/falcon/service/LogCleanupService.java
deleted file mode 100644
index 9962102..0000000
--- a/common/src/main/java/org/apache/falcon/service/LogCleanupService.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.service;
-
-import java.util.Date;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import javax.servlet.jsp.el.ELException;
-import javax.servlet.jsp.el.ExpressionEvaluator;
-
-import org.apache.commons.el.ExpressionEvaluatorImpl;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.aspect.GenericAlert;
-import org.apache.falcon.cleanup.AbstractCleanupHandler;
-import org.apache.falcon.cleanup.FeedCleanupHandler;
-import org.apache.falcon.cleanup.ProcessCleanupHandler;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.util.StartupProperties;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Log cleanup service.
- */
-public class LogCleanupService implements FalconService {
-
-    private static final Logger LOG = LoggerFactory.getLogger(LogCleanupService.class);
-    private final ExpressionEvaluator evaluator = new ExpressionEvaluatorImpl();
-    private final ExpressionHelper resolver = ExpressionHelper.get();
-
-    @Override
-    public String getName() {
-        return "Falcon Log cleanup service";
-    }
-
-    @Override
-    public void init() throws FalconException {
-        Timer timer = new Timer();
-        timer.schedule(new CleanupThread(), 0, getDelay());
-        LOG.info("Falcon log cleanup service initialized");
-    }
-
-    private static class CleanupThread extends TimerTask {
-
-        private final AbstractCleanupHandler processCleanupHandler = new ProcessCleanupHandler();
-        private final AbstractCleanupHandler feedCleanupHandler = new FeedCleanupHandler();
-
-        @Override
-        public void run() {
-            try {
-                LOG.info("Cleaning up logs at: {}", new Date());
-                processCleanupHandler.cleanup();
-                feedCleanupHandler.cleanup();
-            } catch (Throwable t) {
-                LOG.error("Error in cleanup task: ", t);
-                GenericAlert.alertLogCleanupServiceFailed(
-                        "Exception in log cleanup service", t);
-            }
-        }
-    }
-
-    @Override
-    public void destroy() throws FalconException {
-        LOG.info("Falcon log cleanup service destroyed");
-    }
-
-    private long getDelay() throws FalconException {
-        String delay = StartupProperties.get().getProperty(
-                "falcon.cleanup.service.frequency", "days(1)");
-        try {
-            return (Long) evaluator.evaluate("${" + delay + "}", Long.class,
-                    resolver, resolver);
-        } catch (ELException e) {
-            throw new FalconException("Exception in EL evaluation", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/ProxyUserService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/ProxyUserService.java b/common/src/main/java/org/apache/falcon/service/ProxyUserService.java
deleted file mode 100644
index 364c750..0000000
--- a/common/src/main/java/org/apache/falcon/service/ProxyUserService.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.RuntimeProperties;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.AccessControlException;
-import java.text.MessageFormat;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The ProxyUserService checks if a user of a request has proxyuser privileges.
- * <p>
- * This check is based on the following criteria:
- * <p>
- * <ul>
- *     <li>The user of the request must be configured as proxy user in Falcon runtime properties.</li>
- *     <li>The user of the request must be making the request from a whitelisted host.</li>
- *     <li>The user of the request must be making the request on behalf of a user of a whitelisted group.</li>
- * </ul>
- * <p>
- */
-public class ProxyUserService implements FalconService {
-    private static final Logger LOG = LoggerFactory.getLogger(ProxyUserService.class);
-
-
-    private Map<String, Set<String>> proxyUserHosts = new HashMap<>();
-    private Map<String, Set<String>> proxyUserGroups = new HashMap<>();
-
-    private static final String CONF_PREFIX = "falcon.service.ProxyUserService.proxyuser.";
-    private static final String GROUPS = ".groups";
-    private static final String HOSTS = ".hosts";
-    public static final String SERVICE_NAME = ProxyUserService.class.getSimpleName();
-
-    @Override
-    public String getName() {
-        return SERVICE_NAME;
-    }
-
-    /**
-     * Initializes the service.
-     * @throws FalconException thrown if the service could not be configured correctly.
-     */
-    @Override
-    public void init() throws FalconException {
-        Set<Map.Entry<Object, Object>> entrySet = RuntimeProperties.get().entrySet();
-
-        for (Map.Entry<Object, Object> entry : entrySet) {
-            String key = (String) entry.getKey();
-
-            if (key.startsWith(CONF_PREFIX) && key.endsWith(GROUPS)) {
-                String proxyUser = key.substring(0, key.lastIndexOf(GROUPS));
-                if (RuntimeProperties.get().getProperty(proxyUser + HOSTS) == null) {
-                    throw new FalconException(proxyUser + HOSTS + " property not set in runtime "
-                            + "properties. Please add it.");
-                }
-                proxyUser = proxyUser.substring(CONF_PREFIX.length());
-                String value = ((String) entry.getValue()).trim();
-                LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
-                Set<String> values = null;
-                if (!value.equals("*")) {
-                    values = new HashSet<>(Arrays.asList(value.split(",")));
-                }
-                proxyUserGroups.put(proxyUser, values);
-            }
-            if (key.startsWith(CONF_PREFIX) && key.endsWith(HOSTS)) {
-                String proxyUser = key.substring(0, key.lastIndexOf(HOSTS));
-                if (RuntimeProperties.get().getProperty(proxyUser + GROUPS) == null) {
-                    throw new FalconException(proxyUser + GROUPS + " property not set in runtime "
-                            + "properties. Please add it.");
-                }
-                proxyUser = proxyUser.substring(CONF_PREFIX.length());
-                String value = ((String) entry.getValue()).trim();
-                LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
-                Set<String> values = null;
-                if (!value.equals("*")) {
-                    String[] hosts = value.split(",");
-                    for (int i = 0; i < hosts.length; i++) {
-                        String hostName = hosts[i];
-                        try {
-                            hosts[i] = normalizeHostname(hostName);
-                        } catch (Exception ex) {
-                            throw new FalconException("Exception normalizing host name: " + hostName + "."
-                                    + ex.getMessage(), ex);
-                        }
-                        LOG.info("Hostname, original [{}], normalized [{}]", hostName, hosts[i]);
-                    }
-                    values = new HashSet<>(Arrays.asList(hosts));
-                }
-                proxyUserHosts.put(proxyUser, values);
-            }
-        }
-    }
-
-    /**
-     * Verifies a proxyuser.
-     *
-     * @param proxyUser user name of the proxy user.
-     * @param proxyHost host the proxy user is making the request from.
-     * @param doAsUser user the proxy user is impersonating.
-     * @throws java.io.IOException thrown if an error during the validation has occurred.
-     * @throws java.security.AccessControlException thrown if the user is not allowed to perform the proxyuser request.
-     */
-    public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException {
-        validateNotEmpty(proxyUser, "proxyUser",
-                "If you're attempting to use user-impersonation via a proxy user, please make sure that "
-                        + "falcon.service.ProxyUserService.proxyuser.#USER#.hosts and "
-                        + "falcon.service.ProxyUserService.proxyuser.#USER#.groups are configured correctly"
-        );
-        validateNotEmpty(proxyHost, "proxyHost",
-                "If you're attempting to use user-impersonation via a proxy user, please make sure that "
-                        + "falcon.service.ProxyUserService.proxyuser." + proxyUser + ".hosts and "
-                        + "falcon.service.ProxyUserService.proxyuser." + proxyUser + ".groups are configured correctly"
-        );
-        validateNotEmpty(doAsUser, "doAsUser", null);
-        LOG.debug("Authorization check proxyuser [{}] host [{}] doAs [{}]",
-                proxyUser, proxyHost, doAsUser);
-        if (proxyUserHosts.containsKey(proxyUser)) {
-            validateRequestorHost(proxyUser, proxyHost, proxyUserHosts.get(proxyUser));
-            validateGroup(proxyUser, doAsUser, proxyUserGroups.get(proxyUser));
-        } else {
-            throw new AccessControlException(MessageFormat.format("User [{0}] not defined as proxyuser. Please add it"
-                            + " to runtime properties.", proxyUser));
-        }
-    }
-
-    private void validateRequestorHost(String proxyUser, String hostname, Set<String> validHosts)
-        throws IOException {
-        if (validHosts != null) {
-            if (!validHosts.contains(hostname) && !validHosts.contains(normalizeHostname(hostname))) {
-                throw new AccessControlException(MessageFormat.format("Unauthorized host [{0}] for proxyuser [{1}]",
-                        hostname, proxyUser));
-            }
-        }
-    }
-
-    private void validateGroup(String proxyUser, String user, Set<String> validGroups) throws IOException {
-        if (validGroups != null) {
-            List<String> userGroups =  Services.get().<GroupsService>getService(GroupsService.SERVICE_NAME)
-            .getGroups(user);
-            for (String g : validGroups) {
-                if (userGroups.contains(g)) {
-                    return;
-                }
-            }
-            throw new AccessControlException(
-                    MessageFormat.format("Unauthorized proxyuser [{0}] for user [{1}], not in proxyuser groups",
-                            proxyUser, user));
-        }
-    }
-
-    private String normalizeHostname(String name) {
-        try {
-            InetAddress address = InetAddress.getByName(name);
-            return address.getCanonicalHostName();
-        }  catch (IOException ex) {
-            throw new AccessControlException(MessageFormat.format("Could not resolve host [{0}], [{1}]", name,
-                    ex.getMessage()));
-        }
-    }
-
-    private static void validateNotEmpty(String str, String name, String info) {
-        if (StringUtils.isBlank(str)) {
-            throw new IllegalArgumentException(name + " cannot be null or empty" + (info == null ? "" : ", " + info));
-        }
-    }
-
-    /**
-     * Destroys the service.
-     */
-    @Override
-    public void destroy() {
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/ServiceInitializer.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/ServiceInitializer.java b/common/src/main/java/org/apache/falcon/service/ServiceInitializer.java
deleted file mode 100644
index 4708b94..0000000
--- a/common/src/main/java/org/apache/falcon/service/ServiceInitializer.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Initializer that Falcon uses at startup to bring up all the falcon startup services.
- */
-public class ServiceInitializer {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ServiceInitializer.class);
-    private final Services services = Services.get();
-
-    public void initialize() throws FalconException {
-        String serviceClassNames = StartupProperties.get().
-                getProperty("application.services", "org.apache.falcon.entity.store.ConfigurationStore");
-        for (String serviceClassName : serviceClassNames.split(",")) {
-            serviceClassName = serviceClassName.trim();
-            if (serviceClassName.isEmpty()) {
-                continue;
-            }
-            FalconService service = ReflectionUtils.getInstanceByClassName(serviceClassName);
-            services.register(service);
-            LOG.info("Initializing service: {}", serviceClassName);
-            try {
-                service.init();
-            } catch (Throwable t) {
-                LOG.error("Failed to initialize service {}", serviceClassName, t);
-                throw new FalconException(t);
-            }
-            LOG.info("Service initialized: {}", serviceClassName);
-        }
-    }
-
-    public void destroy() throws FalconException {
-        for (FalconService service : services) {
-            LOG.info("Destroying service: {}", service.getClass().getName());
-            try {
-                service.destroy();
-            } catch (Throwable t) {
-                LOG.error("Failed to destroy service {}", service.getClass().getName(), t);
-                throw new FalconException(t);
-            }
-            LOG.info("Service destroyed: {}", service.getClass().getName());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/service/Services.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/service/Services.java b/common/src/main/java/org/apache/falcon/service/Services.java
deleted file mode 100644
index 6659ccd..0000000
--- a/common/src/main/java/org/apache/falcon/service/Services.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.ReflectionUtils;
-
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-/**
- * Repository of services initialized at startup.
- */
-public final class Services implements Iterable<FalconService> {
-
-    private static final Services INSTANCE = new Services();
-
-    private Services() {
-    }
-
-    public static Services get() {
-        return INSTANCE;
-    }
-
-    private final Map<String, FalconService> services =
-            new LinkedHashMap<String, FalconService>();
-
-    public synchronized void register(FalconService service)
-        throws FalconException {
-
-        if (services.containsKey(service.getName())) {
-            throw new FalconException("Service " + service.getName() + " already registered");
-        } else {
-            services.put(service.getName(), service);
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    public <T extends FalconService> T getService(String serviceName) {
-        if (services.containsKey(serviceName)) {
-            return (T) services.get(serviceName);
-        } else {
-            throw new NoSuchElementException("Service " + serviceName + " not registered with registry");
-        }
-    }
-
-    public boolean isRegistered(String serviceName) {
-        return services.containsKey(serviceName);
-    }
-
-    @Override
-    public Iterator<FalconService> iterator() {
-        return services.values().iterator();
-    }
-
-    public FalconService init(String serviceName) throws FalconException {
-        if (isRegistered(serviceName)) {
-            throw new FalconException("Service is already initialized " + serviceName);
-        }
-        FalconService service = ReflectionUtils.getInstance(serviceName + ".impl");
-        register(service);
-        return service;
-    }
-
-    public void reset() {
-        services.clear();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/update/UpdateHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/update/UpdateHelper.java b/common/src/main/java/org/apache/falcon/update/UpdateHelper.java
deleted file mode 100644
index 6603bc6..0000000
--- a/common/src/main/java/org/apache/falcon/update/UpdateHelper.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.update;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.ProcessHelper;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Date;
-
-/**
- * Helper methods to facilitate entity updates.
- */
-public final class UpdateHelper {
-    private static final Logger LOG = LoggerFactory.getLogger(UpdateHelper.class);
-
-    private static final String[] FEED_FIELDS = new String[]{"partitions", "groups", "lateArrival.cutOff",
-                                                             "schema.location", "schema.provider", "tags",
-                                                             "group", "owner", "permission", };
-    private static final String[] PROCESS_FIELDS = new String[]{"retry.policy", "retry.delay", "retry.attempts",
-                                                                "lateProcess.policy", "lateProcess.delay",
-                                                                "lateProcess.lateInputs[\\d+].input",
-                                                                "lateProcess.lateInputs[\\d+].workflowPath",
-                                                                "owner", "group", "permission", "tags",
-                                                                "pipelines", };
-
-    private UpdateHelper() {}
-
-    public static boolean isEntityUpdated(Entity oldEntity, Entity newEntity, String cluster,
-        Path oldStagingPath) throws FalconException {
-        Entity oldView = EntityUtil.getClusterView(oldEntity, cluster);
-        Entity newView = EntityUtil.getClusterView(newEntity, cluster);
-
-        //staging path contains md5 of the cluster view of entity
-        String[] parts = oldStagingPath.getName().split("_");
-        if (parts[0].equals(EntityUtil.md5(newView))) {
-            return false;
-        }
-
-        switch (oldEntity.getEntityType()) {
-        case FEED:
-            return !EntityUtil.equals(oldView, newView, FEED_FIELDS);
-
-        case PROCESS:
-            return !EntityUtil.equals(oldView, newView, PROCESS_FIELDS);
-
-        default:
-        }
-        throw new IllegalArgumentException("Unhandled entity type " + oldEntity.getEntityType());
-    }
-
-    public static boolean shouldUpdate(Entity oldEntity, Entity newEntity, Entity affectedEntity, String cluster)
-        throws FalconException {
-        if (oldEntity.getEntityType() == EntityType.FEED && affectedEntity.getEntityType() == EntityType.PROCESS) {
-
-            Feed oldFeed = (Feed) oldEntity;
-            Feed newFeed = (Feed) newEntity;
-            Process affectedProcess = (Process) affectedEntity;
-
-            //check if affectedProcess is defined for this cluster
-            Cluster processCluster = ProcessHelper.getCluster(affectedProcess, cluster);
-            if (processCluster == null) {
-                LOG.debug("Process {} is not defined for cluster {}. Skipping", affectedProcess.getName(), cluster);
-                return false;
-            }
-
-            if (processCluster.getValidity().getEnd().before(new Date())) {
-                LOG.debug("Process {} validity {} is in the past. Skipping...", affectedProcess.getName(),
-                    processCluster.getValidity().getEnd());
-                return false;
-            }
-
-            if (!oldFeed.getFrequency().equals(newFeed.getFrequency())) {
-                LOG.debug("{}: Frequency has changed. Updating...", oldFeed.toShortString());
-                return true;
-            }
-
-            if (!StringUtils.equals(oldFeed.getAvailabilityFlag(), newFeed.getAvailabilityFlag())) {
-                LOG.debug("{}: Availability flag has changed. Updating...", oldFeed.toShortString());
-                return true;
-            }
-
-            org.apache.falcon.entity.v0.feed.Cluster oldFeedCluster = FeedHelper.getCluster(oldFeed, cluster);
-            org.apache.falcon.entity.v0.feed.Cluster newFeedCluster = FeedHelper.getCluster(newFeed, cluster);
-            if (!oldFeedCluster.getValidity().getStart().equals(newFeedCluster.getValidity().getStart())) {
-                LOG.debug("{}: Start time for cluster {} has changed. Updating...", oldFeed.toShortString(), cluster);
-                return true;
-            }
-
-            Storage oldFeedStorage = FeedHelper.createStorage(cluster, oldFeed);
-            Storage newFeedStorage = FeedHelper.createStorage(cluster, newFeed);
-
-            if (!oldFeedStorage.isIdentical(newFeedStorage)) {
-                LOG.debug("{}: Storage has changed. Updating...", oldFeed.toShortString());
-                return true;
-            }
-            return false;
-
-        } else {
-            LOG.debug(newEntity.toShortString());
-            LOG.debug(affectedEntity.toShortString());
-            throw new FalconException("Don't know what to do. Unexpected scenario");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/ApplicationProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/ApplicationProperties.java b/common/src/main/java/org/apache/falcon/util/ApplicationProperties.java
deleted file mode 100644
index adf09c4..0000000
--- a/common/src/main/java/org/apache/falcon/util/ApplicationProperties.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.util.HashSet;
-import java.util.Properties;
-import java.util.Set;
-
-/**
- * Base class for reading application properties.
- */
-public abstract class ApplicationProperties extends Properties {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ApplicationProperties.class);
-
-    protected abstract String getPropertyFile();
-
-    protected String domain;
-
-    protected ApplicationProperties() throws FalconException {
-        init();
-    }
-
-    protected void init() throws FalconException {
-        setDomain(System.getProperty("falcon.domain", System.getenv("FALCON_DOMAIN")));
-        loadProperties();
-    }
-
-    protected void setDomain(String domain) {
-        this.domain = domain;
-    }
-
-    public String getDomain() {
-        return domain;
-    }
-
-    protected void loadProperties() throws FalconException {
-        String propertyFileName = getPropertyFile();
-        String confDir = System.getProperty("config.location");
-        loadProperties(propertyFileName, confDir);
-    }
-
-    /**
-     * This method reads the given properties file in the following order:
-     * config.location & classpath. It falls back in that specific order.
-     *
-     * @throws FalconException
-     */
-    protected void loadProperties(String propertyFileName, String confDir) throws FalconException {
-        try {
-            InputStream resourceAsStream = checkConfigLocation(propertyFileName, confDir);
-
-            //Fallback to classpath
-            if (resourceAsStream == null) {
-                resourceAsStream = checkClassPath(propertyFileName);
-            }
-
-            if (resourceAsStream != null) {
-                try {
-                    doLoadProperties(resourceAsStream);
-                    return;
-                } finally {
-                    IOUtils.closeQuietly(resourceAsStream);
-                }
-            }
-            throw new FileNotFoundException("Unable to find: " + propertyFileName);
-        } catch (IOException e) {
-            throw new FalconException("Error loading properties file: " + getPropertyFile(), e);
-        }
-    }
-
-    private InputStream checkConfigLocation(String propertyFileName, String confDir)
-        throws FileNotFoundException {
-
-        InputStream resourceAsStream = null;
-        if (confDir != null) {
-            File fileToLoad = new File(confDir, propertyFileName);
-            resourceAsStream = getResourceAsStream(fileToLoad);
-        }
-        return resourceAsStream;
-    }
-
-    protected InputStream getResourceAsStream(File fileToLoad) throws FileNotFoundException {
-        InputStream resourceAsStream = null;
-        if (fileToLoad.exists() && fileToLoad.isFile() && fileToLoad.canRead()) {
-            LOG.info("config.location is set, using: {}", fileToLoad.getAbsolutePath());
-            resourceAsStream = new FileInputStream(fileToLoad);
-        }
-        return resourceAsStream;
-    }
-
-    protected InputStream checkClassPath(String propertyFileName) {
-
-        InputStream resourceAsStream = null;
-        Class clazz = ApplicationProperties.class;
-        URL resource = clazz.getResource("/" + propertyFileName);
-        if (resource != null) {
-            LOG.info("Fallback to classpath for: {}", resource);
-            resourceAsStream = clazz.getResourceAsStream("/" + propertyFileName);
-        } else {
-            resource = clazz.getResource(propertyFileName);
-            if (resource != null) {
-                LOG.info("Fallback to classpath for: {}", resource);
-                resourceAsStream = clazz.getResourceAsStream(propertyFileName);
-            }
-        }
-        return resourceAsStream;
-    }
-
-    private void doLoadProperties(InputStream resourceAsStream) throws IOException, FalconException {
-        Properties origProps = new Properties();
-        origProps.load(resourceAsStream);
-        if (domain == null) {
-            domain = origProps.getProperty("*.domain");
-            if (domain == null) {
-                throw new FalconException("Domain is not set!");
-            } else {
-                domain = ExpressionHelper.substitute(domain);
-            }
-        }
-
-        LOG.info("Initializing {} properties with domain {}", this.getClass().getName(), domain);
-        Set<String> keys = getKeys(origProps.keySet());
-        for (String key : keys) {
-            String value = origProps.getProperty(domain + "." + key, origProps.getProperty("*." + key));
-            if (value != null) {
-                value = ExpressionHelper.substitute(value);
-                LOG.debug("{}={}", key, value);
-                put(key, value);
-            }
-        }
-    }
-
-    protected Set<String> getKeys(Set<Object> keySet) {
-        Set<String> keys = new HashSet<String>();
-        for (Object keyObj : keySet) {
-            String key = (String) keyObj;
-            keys.add(key.substring(key.indexOf('.') + 1));
-        }
-        return keys;
-    }
-
-    @Override
-    public String getProperty(String key) {
-        return StringUtils.trim(super.getProperty(key));
-    }
-
-    @Override
-    public String getProperty(String key, String defaultValue) {
-        return StringUtils.trim(super.getProperty(key, defaultValue));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/BuildProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/BuildProperties.java b/common/src/main/java/org/apache/falcon/util/BuildProperties.java
deleted file mode 100644
index 339dcb5..0000000
--- a/common/src/main/java/org/apache/falcon/util/BuildProperties.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Application build info properties are exposed through this.
- */
-public final class BuildProperties extends ApplicationProperties {
-    private static final String PROPERTY_FILE = "falcon-buildinfo.properties";
-
-    private static final AtomicReference<BuildProperties> INSTANCE =
-            new AtomicReference<BuildProperties>();
-
-    private BuildProperties() throws FalconException {
-        super();
-    }
-
-    @Override
-    protected String getPropertyFile() {
-        return PROPERTY_FILE;
-    }
-
-    public static Properties get() {
-        try {
-            if (INSTANCE.get() == null) {
-                INSTANCE.compareAndSet(null, new BuildProperties());
-            }
-            return INSTANCE.get();
-        } catch (FalconException e) {
-            throw new RuntimeException("Unable to read application "
-                + "falcon build information properties", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/DateUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/DateUtil.java b/common/src/main/java/org/apache/falcon/util/DateUtil.java
deleted file mode 100644
index baf5b13..0000000
--- a/common/src/main/java/org/apache/falcon/util/DateUtil.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.util;
-
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.Frequency;
-
-import java.util.Calendar;
-import java.util.Date;
-import java.util.TimeZone;
-
-/**
- * Helper to get date operations.
- */
-public final class DateUtil {
-
-    private static final long MINUTE_IN_MS = 60 * 1000L;
-    private static final long HOUR_IN_MS = 60 * MINUTE_IN_MS;
-    private static final long DAY_IN_MS = 24 * HOUR_IN_MS;
-    private static final long MONTH_IN_MS = 31 * DAY_IN_MS;
-
-    //Friday, April 16, 9999 7:12:55 AM UTC corresponding date
-    public static final Date NEVER = new Date(Long.parseLong("253379862775000"));
-
-    public static final long HOUR_IN_MILLIS = 60 * 60 * 1000;
-
-    private DateUtil() {}
-
-    public static Date getNextMinute(Date time) throws Exception {
-        Calendar insCal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
-        insCal.setTime(time);
-        insCal.add(Calendar.MINUTE, 1);
-        return insCal.getTime();
-
-    }
-
-    public static String getDateFormatFromTime(long milliSeconds) {
-        return SchemaHelper.getDateFormat().format((new Date(milliSeconds)));
-    }
-
-    /**
-     * This function should not be used for scheduling related functions as it may cause correctness issues in those
-     * scenarios.
-     * @param frequency
-     * @return
-     */
-    public static Long getFrequencyInMillis(Frequency frequency){
-        switch (frequency.getTimeUnit()) {
-
-        case months:
-            return MONTH_IN_MS * frequency.getFrequencyAsInt();
-
-        case days:
-            return DAY_IN_MS * frequency.getFrequencyAsInt();
-
-        case hours:
-            return HOUR_IN_MS * frequency.getFrequencyAsInt();
-
-        case minutes:
-            return MINUTE_IN_MS * frequency.getFrequencyAsInt();
-
-        default:
-            return null;
-        }
-    }
-
-    /**
-     * Returns the current time, with seconds and milliseconds reset to 0.
-     * @return
-     */
-    public static Date now() {
-        Calendar cal = Calendar.getInstance();
-        cal.set(Calendar.SECOND, 0);
-        cal.set(Calendar.MILLISECOND, 0);
-        return cal.getTime();
-    }
-
-    /**
-     * Adds the supplied number of seconds to the given date and returns the new Date.
-     * @param date
-     * @param seconds
-     * @return
-     */
-    public static Date offsetTime(Date date, int seconds) {
-        return new Date(1000L * seconds + date.getTime());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/DeploymentProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/DeploymentProperties.java b/common/src/main/java/org/apache/falcon/util/DeploymentProperties.java
deleted file mode 100644
index 5879f30..0000000
--- a/common/src/main/java/org/apache/falcon/util/DeploymentProperties.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Application deployment properties. particularly relating to
- * whether the server is in embedded mode or distributed mode.
- */
-public final class DeploymentProperties extends ApplicationProperties {
-    private static final String PROPERTY_FILE = "deploy.properties";
-
-    private static final AtomicReference<DeploymentProperties> INSTANCE =
-            new AtomicReference<>();
-
-    private DeploymentProperties() throws FalconException {
-        super();
-    }
-
-    @Override
-    protected String getPropertyFile() {
-        return PROPERTY_FILE;
-    }
-
-    public static Properties get() {
-        try {
-            if (INSTANCE.get() == null) {
-                INSTANCE.compareAndSet(null, new DeploymentProperties());
-            }
-            return INSTANCE.get();
-        } catch (FalconException e) {
-            throw new RuntimeException("Unable to read application " + "startup properties", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/DeploymentUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/DeploymentUtil.java b/common/src/main/java/org/apache/falcon/util/DeploymentUtil.java
deleted file mode 100644
index 561520c..0000000
--- a/common/src/main/java/org/apache/falcon/util/DeploymentUtil.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.entity.ColoClusterRelation;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Helper methods to deployment properties.
- */
-public final class DeploymentUtil {
-    private static final Logger LOG = LoggerFactory.getLogger(DeploymentUtil.class);
-
-    protected static final String DEFAULT_COLO = "default";
-    protected static final String EMBEDDED = "embedded";
-    protected static final String DEPLOY_MODE = "deploy.mode";
-    private static final Set<String> DEFAULT_ALL_COLOS = new HashSet<String>();
-
-    protected static final String CURRENT_COLO;
-    protected static final boolean EMBEDDED_MODE;
-    private static boolean prism = false;
-
-    static {
-        DEFAULT_ALL_COLOS.add(DEFAULT_COLO);
-        EMBEDDED_MODE = DeploymentProperties.get().
-                getProperty(DEPLOY_MODE, EMBEDDED).equals(EMBEDDED);
-        if (EMBEDDED_MODE) {
-            CURRENT_COLO = DEFAULT_COLO;
-        } else {
-            CURRENT_COLO = StartupProperties.get().
-                    getProperty("current.colo", DEFAULT_COLO);
-        }
-        LOG.info("Running in embedded mode? {}", EMBEDDED_MODE);
-        LOG.info("Current colo: {}", CURRENT_COLO);
-    }
-
-    private DeploymentUtil() {}
-
-    public static void setPrismMode() {
-        prism = true;
-    }
-
-    public static boolean isPrism() {
-        return !EMBEDDED_MODE && prism;
-    }
-
-    public static String getCurrentColo() {
-        return CURRENT_COLO;
-    }
-
-    public static Set<String> getCurrentClusters() {
-        // return all clusters in embedded mode
-        if (EMBEDDED_MODE) {
-            Collection<String> allClusters = ConfigurationStore.get().getEntities(EntityType.CLUSTER);
-            Set<String> result = new HashSet<>(allClusters);
-            return result;
-        }
-        String colo = getCurrentColo();
-        return ColoClusterRelation.get().getClusters(colo);
-    }
-
-    public static boolean isEmbeddedMode() {
-        return EMBEDDED_MODE;
-    }
-
-    public static String getDefaultColo() {
-        return DEFAULT_COLO;
-    }
-
-    public static Set<String> getDefaultColos() {
-        DEFAULT_ALL_COLOS.add(DEFAULT_COLO);
-        return DEFAULT_ALL_COLOS;
-    }
-}


[27/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java b/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
deleted file mode 100644
index 3a6d8c0..0000000
--- a/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EntityBuilderTestUtil;
-import org.apache.falcon.entity.EntityNotRegisteredException;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.Collection;
-
-/**
- * Unit tests for DefaultAuthorizationProvider.
- */
-public class DefaultAuthorizationProviderTest {
-
-    public static final String CLUSTER_ENTITY_NAME = "primary-cluster";
-    public static final String PROCESS_ENTITY_NAME = "sample-process";
-
-    private UserGroupInformation realUser;
-    private ConfigurationStore configStore;
-    private Cluster clusterEntity;
-    private Feed feedEntity;
-    private org.apache.falcon.entity.v0.process.Process processEntity;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        realUser = UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_1, new String[]{"falcon", });
-
-        CurrentUser.authenticate(EntityBuilderTestUtil.USER);
-        org.testng.Assert.assertEquals(CurrentUser.getUser(), EntityBuilderTestUtil.USER);
-
-        configStore = ConfigurationStore.get();
-
-        addClusterEntity();
-        addFeedEntity();
-        addProcessEntity();
-        org.testng.Assert.assertNotNull(processEntity);
-    }
-
-    public void addClusterEntity() throws Exception {
-        clusterEntity = EntityBuilderTestUtil.buildCluster(CLUSTER_ENTITY_NAME);
-        configStore.publish(EntityType.CLUSTER, clusterEntity);
-    }
-
-    public void addFeedEntity() throws Exception {
-        feedEntity = EntityBuilderTestUtil.buildFeed("sample-feed", clusterEntity,
-                "classified-as=Secure", "analytics");
-        addStorage(feedEntity, Storage.TYPE.FILESYSTEM, "/falcon/impression-feed/${YEAR}/${MONTH}/${DAY}");
-        configStore.publish(EntityType.FEED, feedEntity);
-    }
-
-    private static void addStorage(Feed feed, Storage.TYPE storageType, String uriTemplate) {
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            Locations locations = new Locations();
-            feed.setLocations(locations);
-
-            Location location = new Location();
-            location.setType(LocationType.DATA);
-            location.setPath(uriTemplate);
-            feed.getLocations().getLocations().add(location);
-        } else {
-            CatalogTable table = new CatalogTable();
-            table.setUri(uriTemplate);
-            feed.setTable(table);
-        }
-    }
-
-    public void addProcessEntity() throws Exception {
-        processEntity = EntityBuilderTestUtil.buildProcess(PROCESS_ENTITY_NAME,
-                clusterEntity, "classified-as=Critical");
-        EntityBuilderTestUtil.addProcessWorkflow(processEntity);
-        EntityBuilderTestUtil.addProcessACL(processEntity);
-
-        configStore.publish(EntityType.PROCESS, processEntity);
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        cleanupStore();
-    }
-
-    protected void cleanupStore() throws FalconException {
-        configStore = ConfigurationStore.get();
-        for (EntityType type : EntityType.values()) {
-            Collection<String> entities = configStore.getEntities(type);
-            for (String entity : entities) {
-                configStore.remove(type, entity);
-            }
-        }
-    }
-
-    @Test
-    public void testAuthorizeAdminResourceVersionAction() throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "blah", realUser, new String[]{"blah-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("admin", "version", null, null, proxyUgi);
-    }
-
-    @Test
-    public void testAuthorizeSuperUser() throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                EntityBuilderTestUtil.USER, realUser, new String[]{"group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "schedule", "feed", feedEntity.getName(), proxyUgi);
-        provider.authorizeResource("instance", "status", "feed", feedEntity.getName(), proxyUgi);
-    }
-
-    @Test
-    public void testAuthorizeSuperUserGroup() throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "blah", realUser, new String[]{"falcon", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "schedule", "feed", feedEntity.getName(), proxyUgi);
-        provider.authorizeResource("instance", "status", "feed", feedEntity.getName(), proxyUgi);
-    }
-
-    @DataProvider(name = "adminResourceActions")
-    private Object[][] createAdminResourceActions() {
-        return new Object[][] {
-            {"version"},
-            {"stack"},
-            {"config"},
-        };
-    }
-
-    @Test (dataProvider = "adminResourceActions")
-    public void testAuthorizeAdminResourceAdmin(String action) throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("admin", action, null, null, proxyUgi);
-    }
-
-    @Test
-    public void testAuthorizeAdminResourceAdminUserBadGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("admin", "version", null, null, proxyUgi);
-    }
-
-    @Test
-    public void testAuthorizeAdminResourceAdminGroupBadUser() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty(
-                "falcon.security.authorization.admin.groups", "admin-group");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin-user", realUser, new String[]{"admin-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("admin", "version", null, null, proxyUgi);
-    }
-
-    @Test (expectedExceptions = AuthorizationException.class)
-    public void testAuthorizeAdminResourceInvalidUserAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin-user", realUser, new String[]{"admin-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("admin", "stack", null, null, proxyUgi);
-        Assert.fail("User does not belong to both admin-users not groups");
-    }
-
-    @DataProvider(name = "entityResourceActions")
-    private Object[][] createEntityResourceActions() {
-        return new Object[][] {
-            {"entities", "list", "feed"},
-            {"entities", "list", "process"},
-            {"entities", "list", "cluster"},
-        };
-    }
-
-    @Test (dataProvider = "entityResourceActions")
-    public void testAuthorizeEntitiesInstancesReadOnlyResource(String resource,
-                                                               String action,
-                                                               String entityType) throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin-user", realUser, new String[]{"admin-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource(resource, action, entityType, null, proxyUgi);
-    }
-
-    @DataProvider(name = "entityLifecycleResourceActions")
-    private Object[][] createEntityLifecycleResourceActions() {
-        return new Object[][] {
-            {"entities", "status", "cluster", "primary-cluster"},
-            {"entities", "status", "process", "sample-process"},
-            {"entities", "status", "feed", "sample-feed"},
-            {"instance", "status", "process", "sample-process"},
-            {"instance", "running", "process", "sample-process"},
-            {"instance", "running", "feed", "sample-feed"},
-        };
-    }
-
-    @Test(dataProvider = "entityLifecycleResourceActions")
-    public void testAuthorizeEntitiesInstancesLifecycleResource(String resource, String action,
-                                                                String entityType,
-                                                                String entityName) throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                EntityBuilderTestUtil.USER, realUser, new String[]{EntityBuilderTestUtil.USER, });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource(resource, action, entityType, entityName, proxyUgi);
-    }
-
-    @Test(dataProvider = "entityLifecycleResourceActions",
-            expectedExceptions = AuthorizationException.class)
-    public void testAuthorizeEntitiesInstancesLifecycleResourceBadUGI(String resource,
-                                                                      String action,
-                                                                      String entityType,
-                                                                      String entityName)
-        throws Exception {
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin-user", realUser, new String[]{"admin-group", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource(resource, action, entityType, entityName, proxyUgi);
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class)
-    public void testAuthorizeBadResource() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("invalid", "version", null, null, proxyUgi);
-        Assert.fail("Bad resource");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class)
-    public void testAuthorizeNullResource() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource(null, "version", null, null, proxyUgi);
-        Assert.fail("Bad resource");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class)
-    public void testAuthorizeBadAction() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", null, "feedz", null, proxyUgi);
-        Assert.fail("Bad action");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class)
-    public void testAuthorizeNullEntityType() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "list", null, "primary-cluster", proxyUgi);
-        Assert.fail("Bad entity type");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class)
-    public void testAuthorizeBadEntityType() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "list", "clusterz", "primary-cluster", proxyUgi);
-        Assert.fail("Bad entity type");
-    }
-
-    @Test
-    public void testAuthorizeValidatePOSTOperations() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        EntityBuilderTestUtil.addProcessACL(processEntity, "admin", "admin");
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeEntity(processEntity.getName(), "process",
-                processEntity.getACL(), "submit", proxyUgi);
-    }
-
-    @Test (expectedExceptions = EntityNotRegisteredException.class)
-    public void testAuthorizeResourceOperationsBadEntity() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "status", "process", feedEntity.getName(), proxyUgi);
-        Assert.fail("Bad entity");
-    }
-
-    @Test
-    public void testAuthorizeValidatePOSTOperationsGroupBadUser() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        EntityBuilderTestUtil.addProcessACL(processEntity, "admin-user", "admin");
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeEntity(processEntity.getName(), "process",
-                processEntity.getACL(), "submit", proxyUgi);
-    }
-
-    @Test (expectedExceptions = AuthorizationException.class)
-    public void testAuthorizeValidatePOSTOperationsBadUserAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        EntityBuilderTestUtil.addProcessACL(processEntity, "admin-user", "admin-group");
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeEntity(processEntity.getName(), "process",
-                processEntity.getACL(), "submit", proxyUgi);
-    }
-
-    @Test
-    public void testAuthorizeLineageResource() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.users", "admin");
-        StartupProperties.get().setProperty("falcon.security.authorization.admin.groups", "admin");
-
-        UserGroupInformation proxyUgi = UserGroupInformation.createProxyUserForTesting(
-                "admin", realUser, new String[]{"admin", });
-
-        DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("metadata", "lineage", null, null, proxyUgi);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/security/SecurityUtilTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/security/SecurityUtilTest.java b/common/src/test/java/org/apache/falcon/security/SecurityUtilTest.java
deleted file mode 100644
index d47acbc..0000000
--- a/common/src/test/java/org/apache/falcon/security/SecurityUtilTest.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.service.GroupsService;
-import org.apache.falcon.service.ProxyUserService;
-import org.apache.falcon.service.Services;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.falcon.util.RuntimeProperties;
-import org.mockito.Mockito;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-
-/**
- * Unit test for Security utils.
- */
-public class SecurityUtilTest {
-
-    private ProxyUserService proxyUserService;
-    private GroupsService groupsService;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        Services.get().register(new ProxyUserService());
-        Services.get().register(new GroupsService());
-        groupsService = Services.get().getService(GroupsService.SERVICE_NAME);
-        proxyUserService = Services.get().getService(ProxyUserService.SERVICE_NAME);
-        groupsService.init();
-
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        proxyUserService.destroy();
-        groupsService.destroy();
-        Services.get().reset();
-    }
-
-    @Test
-    public void testDefaultGetAuthenticationType() throws Exception {
-        Assert.assertEquals(SecurityUtil.getAuthenticationType(), "simple");
-    }
-
-    @Test
-    public void testGetAuthenticationType() throws Exception {
-        try {
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, "kerberos");
-            Assert.assertEquals(SecurityUtil.getAuthenticationType(), "kerberos");
-        } finally {
-            // reset
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, "simple");
-        }
-    }
-
-    @Test
-    public void testIsSecurityEnabledByDefault() throws Exception {
-        Assert.assertFalse(SecurityUtil.isSecurityEnabled());
-    }
-
-    @Test
-    public void testIsSecurityEnabled() throws Exception {
-        try {
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, "kerberos");
-            Assert.assertTrue(SecurityUtil.isSecurityEnabled());
-        } finally {
-            // reset
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, "simple");
-        }
-    }
-
-    @Test
-    public void testIsAuthorizationEnabledByDefault() throws Exception {
-        Assert.assertFalse(SecurityUtil.isAuthorizationEnabled());
-    }
-
-    @Test
-    public void testIsAuthorizationEnabled() throws Exception {
-        try {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-            Assert.assertTrue(SecurityUtil.isAuthorizationEnabled());
-        } finally {
-            // reset
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test
-    public void testGetAuthorizationProviderByDefault() throws Exception {
-        Assert.assertNotNull(SecurityUtil.getAuthorizationProvider());
-        Assert.assertEquals(SecurityUtil.getAuthorizationProvider().getClass(),
-                DefaultAuthorizationProvider.class);
-    }
-
-    @Test
-    public void testTryProxy() throws IOException, FalconException {
-        Process process = Mockito.mock(Process.class);
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        final String currentUser = System.getProperty("user.name");
-
-        // When ACL not specified
-        CurrentUser.authenticate(currentUser);
-        SecurityUtil.tryProxy(process, "");
-        Assert.assertEquals(CurrentUser.getUser(), currentUser);
-
-        ACL acl = new ACL();
-        acl.setOwner(FalconTestUtil.TEST_USER_2);
-        acl.setGroup("users");
-        Mockito.when(process.getACL()).thenReturn(acl);
-
-        // When ACL is specified
-        SecurityUtil.tryProxy(process, "");
-        Assert.assertEquals(CurrentUser.getUser(), FalconTestUtil.TEST_USER_2);
-    }
-
-    @Test (expectedExceptions = FalconException.class,
-           expectedExceptionsMessageRegExp = "doAs user and ACL owner mismatch.*")
-    public void testTryProxyWithDoAsUser() throws IOException, FalconException {
-        Process process = Mockito.mock(Process.class);
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        final String currentUser = "foo";
-
-        ACL acl = new ACL();
-        acl.setOwner(FalconTestUtil.TEST_USER_2);
-        acl.setGroup("users");
-        Mockito.when(process.getACL()).thenReturn(acl);
-
-        CurrentUser.authenticate(currentUser);
-        CurrentUser.proxyDoAsUser("doAsUser", "localhost");
-
-        Assert.assertEquals(CurrentUser.getUser(), "doAsUser");
-        SecurityUtil.tryProxy(process, "doAsUser");
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/service/GroupsServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/service/GroupsServiceTest.java b/common/src/test/java/org/apache/falcon/service/GroupsServiceTest.java
deleted file mode 100644
index be5cbe7..0000000
--- a/common/src/test/java/org/apache/falcon/service/GroupsServiceTest.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-import org.testng.Assert;
-
-import java.util.List;
-
-/**
- * Unit tests for GroupsService.
- */
-public class GroupsServiceTest {
-
-    private GroupsService service;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        service = new GroupsService();
-        service.init();
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        service.destroy();
-    }
-
-    @Test
-    public void testGetName() throws Exception {
-        Assert.assertEquals(service.getName(), GroupsService.SERVICE_NAME);
-    }
-
-    @Test
-    public void testGroupsService() throws Exception {
-        List<String> g = service.getGroups(System.getProperty("user.name"));
-        Assert.assertNotSame(g.size(), 0);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/service/ProxyUserServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/service/ProxyUserServiceTest.java b/common/src/test/java/org/apache/falcon/service/ProxyUserServiceTest.java
deleted file mode 100644
index 83ec6c2..0000000
--- a/common/src/test/java/org/apache/falcon/service/ProxyUserServiceTest.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.service;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.RuntimeProperties;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.security.AccessControlException;
-import java.util.List;
-
-/**
- * Unit tests for ProxyUserService.
- */
-public class ProxyUserServiceTest {
-
-    private ProxyUserService proxyUserService;
-    private GroupsService groupsService;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        Services.get().register(new ProxyUserService());
-        Services.get().register(new GroupsService());
-
-        groupsService = Services.get().getService(GroupsService.SERVICE_NAME);
-        proxyUserService = Services.get().getService(ProxyUserService.SERVICE_NAME);
-        groupsService.init();
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        proxyUserService.destroy();
-        groupsService.destroy();
-        Services.get().reset();
-    }
-
-    @Test
-    public void testGetName() throws Exception {
-        proxyUserService.init();
-        Assert.assertEquals(proxyUserService.getName(), ProxyUserService.SERVICE_NAME);
-    }
-
-    @Test (expectedExceptions = FalconException.class, expectedExceptionsMessageRegExp = ".*falcon.service"
-            + ".ProxyUserService.proxyuser.foo.groups property not set in runtime properties.*")
-    public void testWrongConfigGroups() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().remove("falcon.service.ProxyUserService.proxyuser.foo.groups");
-        proxyUserService.init();
-    }
-
-    @Test (expectedExceptions = FalconException.class, expectedExceptionsMessageRegExp = ".*falcon.service"
-            + ".ProxyUserService.proxyuser.foo.hosts property not set in runtime properties.*")
-    public void testWrongConfigHosts() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        RuntimeProperties.get().remove("falcon.service.ProxyUserService.proxyuser.foo.hosts");
-        proxyUserService.init();
-    }
-
-    @Test (expectedExceptions = FalconException.class,
-           expectedExceptionsMessageRegExp = "Exception normalizing host name.*")
-    public void testWrongHost() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "otherhost");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-    }
-
-    @Test
-    public void testValidateAnyHostAnyUser() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-        proxyUserService.validate("foo", "localhost", "bar");
-    }
-
-    @Test (expectedExceptions = AccessControlException.class,
-           expectedExceptionsMessageRegExp = "User .* not defined as proxyuser.*")
-    public void testInvalidProxyUser() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-        proxyUserService.validate("bar", "localhost", "foo");
-    }
-
-    @Test
-    public void testValidateHost() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-        proxyUserService.validate("foo", "localhost", "bar");
-    }
-
-    private String getGroup() throws Exception {
-        List<String> g = groupsService.getGroups(System.getProperty("user.name"));
-        return g.get(0);
-    }
-
-    @Test
-    public void testValidateGroup() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups",
-                    getGroup());
-
-        proxyUserService.init();
-        proxyUserService.validate("foo", "localhost", System.getProperty("user.name"));
-    }
-
-    @Test (expectedExceptions = AccessControlException.class,
-        expectedExceptionsMessageRegExp = "Could not resolve host .*")
-    public void testUnknownHost() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "localhost");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-        proxyUserService.validate("foo", "unknownhost.bar.foo", "bar");
-    }
-
-    @Test (expectedExceptions = AccessControlException.class,
-            expectedExceptionsMessageRegExp = "Unauthorized host .*")
-    public void testInvalidHost() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "localhost");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-        proxyUserService.validate("foo", "www.example.com", "bar");
-    }
-
-    @Test (expectedExceptions = AccessControlException.class,
-           expectedExceptionsMessageRegExp = "Unauthorized proxyuser .*, not in proxyuser groups")
-    public void testInvalidGroup() throws Exception {
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "localhost");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "nobody");
-        proxyUserService.init();
-        proxyUserService.validate("foo", "localhost", System.getProperty("user.name"));
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class,
-           expectedExceptionsMessageRegExp = "proxyUser cannot be null or empty, .*")
-    public void testNullProxyUser() throws Exception {
-        proxyUserService.init();
-        proxyUserService.validate(null, "localhost", "bar");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class,
-           expectedExceptionsMessageRegExp = "proxyHost cannot be null or empty, .*")
-    public void testNullHost() throws Exception {
-        proxyUserService.init();
-        proxyUserService.validate("foo", null, "bar");
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/update/UpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/update/UpdateHelperTest.java b/common/src/test/java/org/apache/falcon/update/UpdateHelperTest.java
deleted file mode 100644
index 3e48e26..0000000
--- a/common/src/test/java/org/apache/falcon/update/UpdateHelperTest.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.update;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.parser.EntityParserFactory;
-import org.apache.falcon.entity.parser.FeedEntityParser;
-import org.apache.falcon.entity.parser.ProcessEntityParser;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.Partition;
-import org.apache.falcon.entity.v0.feed.Properties;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.process.LateProcess;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Test for Update helper methods.
- */
-public class UpdateHelperTest extends AbstractTestBase {
-    private final FeedEntityParser parser = (FeedEntityParser)EntityParserFactory.getParser(EntityType.FEED);
-    private final ProcessEntityParser processParser =
-        (ProcessEntityParser)EntityParserFactory.getParser(EntityType.PROCESS);
-
-    @BeforeClass
-    public void init() throws Exception {
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
-        this.conf = dfsCluster.getConf();
-        setup();
-    }
-
-    @AfterClass
-    public void tearDown() {
-        this.dfsCluster.shutdown();
-    }
-
-    @BeforeMethod
-    public void setUp() throws Exception {
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        storeEntity(EntityType.CLUSTER, "backupCluster");
-        storeEntity(EntityType.FEED, "clicksFeed");
-        storeEntity(EntityType.FEED, "impressionFeed");
-        storeEntity(EntityType.FEED, "imp-click-join1");
-        storeEntity(EntityType.FEED, "imp-click-join2");
-    }
-
-    private void prepare(Process process) throws IOException, FalconException {
-        FileSystem fs = dfsCluster.getFileSystem();
-        Cluster clusterEntity = ConfigurationStore.get().get(EntityType.CLUSTER, "testCluster");
-        Path staging = EntityUtil.getNewStagingPath(clusterEntity, process);
-        fs.mkdirs(staging);
-        fs.create(new Path(staging, "workflow.xml")).close();
-        fs.create(new Path(staging, "checksums")).close();
-    }
-
-    @Test
-    public void testIsEntityUpdated() throws Exception {
-        Feed oldFeed = parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-        String cluster = "testCluster";
-        Feed newFeed = (Feed) oldFeed.copy();
-        Cluster clusterEntity = ConfigurationStore.get().get(EntityType.CLUSTER, cluster);
-
-        Path feedPath = EntityUtil.getNewStagingPath(clusterEntity, oldFeed);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-
-        //Add tags and ensure isEntityUpdated returns false
-        newFeed.setTags("category=test");
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-
-        newFeed.setGroups("newgroups");
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-        newFeed.getLateArrival().setCutOff(Frequency.fromString("hours(8)"));
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-        newFeed.setFrequency(Frequency.fromString("days(1)"));
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-
-        Process oldProcess = processParser.parseAndValidate(this.getClass().getResourceAsStream(PROCESS_XML));
-        prepare(oldProcess);
-        Process newProcess = (Process) oldProcess.copy();
-        Path procPath = EntityUtil.getNewStagingPath(clusterEntity, oldProcess);
-
-        newProcess.getRetry().setPolicy(PolicyType.FINAL);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-        newProcess.getLateProcess().getLateInputs().remove(1);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-        newProcess.getLateProcess().setPolicy(PolicyType.PERIODIC);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-        newProcess.setFrequency(Frequency.fromString("days(1)"));
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-
-        //Adding new cluster shouldn't cause update in the old cluster
-        newProcess = (Process) oldProcess.copy();
-        org.apache.falcon.entity.v0.process.Cluster procCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        procCluster.setName("newcluster");
-        procCluster.setValidity(newProcess.getClusters().getClusters().get(0).getValidity());
-        newProcess.getClusters().getClusters().add(procCluster);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-
-        //change pipelines and ensure it doesn't cause an update
-        oldProcess.setPipelines("test");
-        newProcess.setPipelines("newTest");
-        newProcess.setTags("category=test");
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-
-        //In the case of incomplete update, where new entity is scheduled but still not updated in config store,
-        //another update call shouldn't cause update in workflow engine
-        newProcess.setFrequency(Frequency.fromString("days(1)"));
-        procPath = EntityUtil.getNewStagingPath(clusterEntity, newProcess);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-    }
-
-    @Test
-    public void testShouldUpdateAffectedEntities() throws Exception {
-        Feed oldFeed = parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-
-        Feed newFeed = (Feed) oldFeed.copy();
-        Process process = processParser.parseAndValidate(this.getClass().getResourceAsStream(PROCESS_XML));
-        prepare(process);
-        String cluster = process.getClusters().getClusters().get(0).getName();
-
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        newFeed.getLateArrival().setCutOff(Frequency.fromString("hours(1)"));
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        newFeed.getLateArrival().setCutOff(oldFeed.getLateArrival().getCutOff());
-        getLocation(newFeed, LocationType.DATA, cluster).setPath("/test");
-        Assert.assertTrue(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        getLocation(newFeed, LocationType.DATA, cluster).setPath(
-                getLocation(oldFeed, LocationType.DATA, cluster).getPath());
-        newFeed.setFrequency(Frequency.fromString("months(1)"));
-        Assert.assertTrue(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        newFeed.setFrequency(oldFeed.getFrequency());
-        Partition partition = new Partition();
-        partition.setName("1");
-        newFeed.getPartitions().getPartitions().add(partition);
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        Property property = new Property();
-        property.setName("1");
-        property.setValue("1");
-        newFeed.setProperties(new Properties());
-        newFeed.getProperties().getProperties().add(property);
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        newFeed.getProperties().getProperties().remove(0);
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        //Change in start time should trigger process update as instance time changes
-        FeedHelper.getCluster(newFeed, process.getClusters().getClusters().get(0).getName()).getValidity().setStart(
-                SchemaHelper.parseDateUTC("2012-11-01T00:00Z"));
-        Assert.assertTrue(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-
-        FeedHelper.getCluster(newFeed, process.getClusters().getClusters().get(0).getName()).getValidity().
-                setStart(FeedHelper.getCluster(oldFeed,
-                        process.getClusters().getClusters().get(0).getName()).getValidity().getStart());
-
-        //Change location to table should trigger process update
-        newFeed.setLocations(null);
-        CatalogTable table = new CatalogTable();
-        table.setUri("catalog:default:clicks-blah#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}");
-        newFeed.setTable(table);
-        Assert.assertFalse(UpdateHelper.shouldUpdate(oldFeed, newFeed, process, cluster));
-    }
-
-    @Test
-    public void testIsEntityUpdatedTable() throws Exception {
-        InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
-        Feed oldTableFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(inputStream);
-        getStore().publish(EntityType.FEED, oldTableFeed);
-
-        String cluster = "testCluster";
-        Cluster clusterEntity = ConfigurationStore.get().get(EntityType.CLUSTER, cluster);
-        Path feedPath = EntityUtil.getNewStagingPath(clusterEntity, oldTableFeed);
-        Feed newTableFeed = (Feed) oldTableFeed.copy();
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldTableFeed, newTableFeed, cluster, feedPath));
-
-        newTableFeed.setGroups("newgroups");
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldTableFeed, newTableFeed, cluster, feedPath));
-        newTableFeed.setFrequency(Frequency.fromString("days(1)"));
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldTableFeed, newTableFeed, cluster, feedPath));
-
-        final CatalogTable table = new CatalogTable();
-        table.setUri("catalog:default:clicks-blah#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}");
-        newTableFeed.setTable(table);
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldTableFeed, newTableFeed, cluster, feedPath));
-
-        inputStream = getClass().getResourceAsStream("/config/process/process-table.xml");
-        Process oldProcess = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(inputStream);
-        FileSystem fs = dfsCluster.getFileSystem();
-        Path staging = EntityUtil.getNewStagingPath(clusterEntity, oldProcess);
-        fs.mkdirs(staging);
-        fs.create(new Path(staging, "workflow.xml")).close();
-        fs.create(new Path(staging, "checksums")).close();
-        Process newProcess = (Process) oldProcess.copy();
-        Path procPath = EntityUtil.getNewStagingPath(clusterEntity, oldProcess);
-
-        newProcess.getRetry().setPolicy(PolicyType.FINAL);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-        newProcess.setFrequency(Frequency.fromString("days(1)"));
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-    }
-
-    @Test
-    public void testIsEntityACLUpdated() throws Exception {
-        Feed oldFeed = parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-        String cluster = "testCluster";
-        Feed newFeed = (Feed) oldFeed.copy();
-        Cluster clusterEntity = ConfigurationStore.get().get(EntityType.CLUSTER, cluster);
-
-        Path feedPath = EntityUtil.getNewStagingPath(clusterEntity, oldFeed);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-
-        newFeed.getACL().setOwner("new-user");
-        newFeed.getACL().setGroup("new-group");
-        Assert.assertNotEquals(oldFeed.getACL().getOwner(), newFeed.getACL().getOwner());
-        Assert.assertNotEquals(oldFeed.getACL().getGroup(), newFeed.getACL().getGroup());
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldFeed, newFeed, cluster, feedPath));
-
-        Process oldProcess = processParser.parseAndValidate(this.getClass().getResourceAsStream(PROCESS_XML));
-        prepare(oldProcess);
-        Process newProcess = (Process) oldProcess.copy();
-        Path procPath = EntityUtil.getNewStagingPath(clusterEntity, oldProcess);
-
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-        org.apache.falcon.entity.v0.process.ACL processACL =
-                new org.apache.falcon.entity.v0.process.ACL();
-        processACL.setOwner("owner");
-        processACL.setOwner("group");
-        newProcess.setACL(processACL);
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-    }
-
-    @Test
-    public void testIsEntityLateProcessUpdated() throws Exception {
-        String cluster = "testCluster";
-        Cluster clusterEntity = ConfigurationStore.get().get(EntityType.CLUSTER, cluster);
-        Process oldProcess = processParser.parseAndValidate(this.getClass().getResourceAsStream(PROCESS_XML));
-        prepare(oldProcess);
-        Path procPath = EntityUtil.getNewStagingPath(clusterEntity, oldProcess);
-
-        // The Process should not be updated when late processing is updated.
-        // As the definition does not affect the Oozie workflow.
-        Process newProcess = (Process) oldProcess.copy();
-        newProcess.getLateProcess().setPolicy(PolicyType.FINAL);
-        Assert.assertFalse(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-
-        LateProcess lateProcess = newProcess.getLateProcess();
-        newProcess.setLateProcess(null);
-
-        // The Process should be updated when late processing is removed.
-        // Pre-processing needs to be removed from the workflow
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(oldProcess, newProcess, cluster, procPath));
-
-        Process newerProcess = (Process) newProcess.copy();
-        newerProcess.setLateProcess(lateProcess);
-
-        // The Process should be updated when late processing is added.
-        // Pre-processing needs to be added to the workflow
-        Assert.assertTrue(UpdateHelper.isEntityUpdated(newProcess, newerProcess, cluster, procPath));
-    }
-
-    private static Location getLocation(Feed feed, LocationType type, String cluster) {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster);
-        if (feedCluster.getLocations() != null) {
-            return getLocation(feedCluster.getLocations(), type);
-        }
-        return getLocation(feed.getLocations(), type);
-    }
-
-    private static Location getLocation(Locations locations, LocationType type) {
-        for (Location loc : locations.getLocations()) {
-            if (loc.getType() == type) {
-                return loc;
-            }
-        }
-        Location loc = new Location();
-        loc.setPath("/tmp");
-        loc.setType(type);
-        return loc;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/ApplicationPropertiesTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/ApplicationPropertiesTest.java b/common/src/test/java/org/apache/falcon/util/ApplicationPropertiesTest.java
deleted file mode 100644
index d899d53..0000000
--- a/common/src/test/java/org/apache/falcon/util/ApplicationPropertiesTest.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import java.io.File;
-import java.io.FileOutputStream;
-
-/**
- * Test for Application properties test.
- */
-public class ApplicationPropertiesTest {
-
-    @Test
-    public void testConfigLocation() throws Exception {
-        File target = new File("target");
-        if (!target.exists()) {
-            target = new File("common/target");
-        }
-
-        FileOutputStream out = new FileOutputStream(new File(target, "config.properties"));
-        out.write("*.domain=unittest\n".getBytes());
-        out.write("unittest.test=hello world\n".getBytes());
-        out.close();
-        ApplicationProperties configLocation = new ConfigLocation();
-        configLocation.loadProperties("config.properties", target.getAbsolutePath());
-        Assert.assertEquals(configLocation.getDomain(), "unittest");
-        Assert.assertEquals(configLocation.get("test"), "hello world");
-    }
-
-    @Test
-    public void testClassPathLocation() throws Exception {
-        ApplicationProperties classPathLocation = new ClassPathLocation();
-        classPathLocation.loadProperties("classpath.properties", null);
-        Assert.assertEquals(classPathLocation.getDomain(), "unittest");
-        Assert.assertEquals(classPathLocation.get("test"), "hello world");
-    }
-
-    @Test
-    public void testPropertiesWithSpaces() throws Exception{
-        ApplicationProperties properties = new ConfigLocation();
-        properties.put("key1", "value with trailing spaces.  ");
-        properties.put("key2", "  value with leading spaces.");
-        properties.put("key3", "  value with spaces on both ends. ");
-        Assert.assertEquals(properties.getProperty("key1"), "value with trailing spaces.");
-        Assert.assertEquals(properties.getProperty("key2"), "value with leading spaces.");
-        Assert.assertEquals(properties.getProperty("key3"), "value with spaces on both ends.");
-    }
-
-    @Test (expectedExceptions = FalconException.class)
-    public void testMissingLocation() throws FalconException {
-        new MissingLocation().loadProperties();
-    }
-
-    private class ConfigLocation extends ApplicationProperties {
-
-        protected ConfigLocation() throws FalconException {
-        }
-
-        protected void init() {}
-
-        @Override
-        protected String getPropertyFile() {
-            return "config.properties";
-        }
-    }
-
-    private class ClassPathLocation extends ApplicationProperties {
-
-        protected ClassPathLocation() throws FalconException {
-        }
-
-        protected void init() {}
-
-        @Override
-        protected String getPropertyFile() {
-            return "classpath.properties";
-        }
-    }
-
-    private class MissingLocation extends ApplicationProperties {
-
-        protected MissingLocation() throws FalconException {
-        }
-
-        @Override
-        protected String getPropertyFile() {
-            return "missing.properties";
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/FalconTestUtil.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/FalconTestUtil.java b/common/src/test/java/org/apache/falcon/util/FalconTestUtil.java
deleted file mode 100644
index 2a890ae..0000000
--- a/common/src/test/java/org/apache/falcon/util/FalconTestUtil.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-/**
- * Utilities for falcon unit tests.
- */
-public final class FalconTestUtil {
-
-    public static final String TEST_USER_1 = "falcon-ut-user";
-    public static final String TEST_USER_2 = "testuser-ut-user";
-
-    private FalconTestUtil() {
-        //default constructor to avoid
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/HadoopQueueUtilTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/HadoopQueueUtilTest.java b/common/src/test/java/org/apache/falcon/util/HadoopQueueUtilTest.java
deleted file mode 100644
index bb37343..0000000
--- a/common/src/test/java/org/apache/falcon/util/HadoopQueueUtilTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Utilities for falcon unit tests.
- */
-public final class HadoopQueueUtilTest {
-
-    @Test
-    public void testGetHadoopClusterQueueNamesHelper1() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-schedulerinfo-1.json");
-        BufferedReader br = new BufferedReader(new InputStreamReader(inputStream));
-        String jsonResult = "";
-        String line;
-        while((line = br.readLine()) != null) {
-            jsonResult += line;
-        }
-        Set<String> qNames = new HashSet<>();
-        HadoopQueueUtil.getHadoopClusterQueueNamesHelper(jsonResult, qNames);
-        Assert.assertEquals(qNames.size(), 9);
-    }
-
-    @Test
-    public void testGetHadoopClusterQueueNamesHelper2() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-schedulerinfo-2.json");
-        BufferedReader br = new BufferedReader(new InputStreamReader(inputStream));
-        String jsonResult = "";
-        String line;
-        while((line = br.readLine()) != null) {
-            jsonResult += line;
-        }
-        Set<String> qNames = new HashSet<>();
-        HadoopQueueUtil.getHadoopClusterQueueNamesHelper(jsonResult, qNames);
-        Assert.assertTrue(qNames.contains("default"));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/RadixNodeTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/RadixNodeTest.java b/common/src/test/java/org/apache/falcon/util/RadixNodeTest.java
deleted file mode 100644
index aea28e6..0000000
--- a/common/src/test/java/org/apache/falcon/util/RadixNodeTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.testng.Assert;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.Arrays;
-import java.util.HashSet;
-
-/**
- * Tests for Radix Node.
- */
-public class RadixNodeTest {
-    private RadixNode<String> rootNode =  new RadixNode<String>();
-    private RadixNode<String> normalNode = new RadixNode<String>();
-
-    @BeforeMethod
-    public void setUp(){
-        rootNode.setKey("");
-        rootNode.setValues(new HashSet<String>(Arrays.asList("root")));
-
-        normalNode.setKey("/data/cas/");
-        normalNode.setValues(new HashSet<String>(Arrays.asList("CAS Project")));
-
-    }
-
-
-    @Test
-    public void testMatchingWithRoot(){
-        String inputKey = "/data/cas/";
-        Assert.assertEquals(rootNode.getMatchLength(inputKey), 0);
-    }
-
-    @Test
-    public void testEmptyMatchingWithRoot(){
-        String inputKey = "";
-        Assert.assertEquals(rootNode.getMatchLength(inputKey), 0);
-    }
-
-    @Test
-    public void testNullMatchingWithRoot(){
-        Assert.assertEquals(rootNode.getMatchLength(null), 0);
-    }
-
-    @Test
-    public void testDistinctStringMatching(){
-        String inputKey = "data/cas";
-        Assert.assertEquals(normalNode.getMatchLength(inputKey), 0);
-    }
-
-    @Test
-    public void testSameStringMatching(){
-        String inputKey = "/data/cas";
-        Assert.assertEquals(normalNode.getMatchLength(inputKey), 9);
-    }
-
-    @Test
-    public void testNullStringMatching(){
-        Assert.assertEquals(normalNode.getMatchLength(null), 0);
-    }
-
-
-    @Test
-    public void testAddingDuplicateValues() {
-        rootNode.addValue("root");
-        Assert.assertEquals(rootNode.getValues().size(), 1);
-    }
-
-    @Test
-    public void testAddMultipleValues() {
-        normalNode.addValue("data");
-        Assert.assertTrue(normalNode.containsValue("data"));
-        Assert.assertTrue(normalNode.containsValue("CAS Project"));
-    }
-
-    @Test
-    public void testMatchInput() {
-        RadixNode<String> node = new RadixNode<String>();
-
-        FalconRadixUtils.INodeAlgorithm matcher = new FalconRadixUtils.FeedRegexAlgorithm();
-        node.setKey("/data/cas/projects/${YEAR}/${MONTH}/${DAY}");
-        Assert.assertTrue(node.matches("/data/cas/projects/2014/09/09", matcher));
-        Assert.assertFalse(node.matches("/data/cas/projects/20140909", matcher));
-        Assert.assertFalse(node.matches("/data/2014/projects/2014/09/09", matcher));
-        Assert.assertFalse(node.matches("/data/2014/projects/2014/09/", matcher));
-        Assert.assertFalse(node.matches("/data/cas/projects/2014/09/09trail", matcher));
-        Assert.assertFalse(node.matches("/data/cas/projects/2014/09/09/", matcher));
-        Assert.assertFalse(node.matches("/data/cas/projects/2014/09/", matcher));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/RadixTreeTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/RadixTreeTest.java b/common/src/test/java/org/apache/falcon/util/RadixTreeTest.java
deleted file mode 100644
index e8b0e5b..0000000
--- a/common/src/test/java/org/apache/falcon/util/RadixTreeTest.java
+++ /dev/null
@@ -1,322 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.entity.store.FeedPathStore;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.resource.FeedLookupResult;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.Collection;
-import java.util.List;
-
-/**
- * Tests for Radix Tree.
- */
-public class RadixTreeTest {
-
-    private RadixTree<String> tree;
-    private FalconRadixUtils.INodeAlgorithm regexAlgorithm = new FalconRadixUtils.FeedRegexAlgorithm();
-
-    @BeforeMethod
-    public void setUp() {
-        tree = new RadixTree<String>();
-        tree.insert("key1", "value1");
-        tree.insert("key2", "value2");
-        tree.insert("random", "random");
-    }
-
-    @AfterMethod
-    public void reset() {
-        tree = null;
-    }
-
-    @Test
-    public void testInsertAtRootTest()  {
-        FeedPathStore<String> tree2 = new RadixTree<String>();
-        tree2.insert("/data/cas/projects/dwh/", "dwh");
-        Assert.assertEquals(tree2.find("/data/cas/projects/dwh/").size(), 1);
-        Assert.assertTrue(tree2.find("/data/cas/projects/dwh/").contains("dwh"));
-
-    }
-
-
-    @Test
-    public void testDuplicateKeyInsert() {
-        tree.insert("duplicatekey", "value1");
-        tree.insert("duplicatekey", "value2");
-        Assert.assertEquals(tree.find("duplicatekey").size(), 2);
-        Assert.assertTrue(tree.find("duplicatekey").contains("value1"));
-        Assert.assertTrue(tree.find("duplicatekey").contains("value2"));
-    }
-
-    @Test
-    public void testGetNextCandidate() {
-        tree.insert("/projects/userplatform/${YEAR}-${MONTH}-${DAY}", "feed1");
-        tree.insert("/projects/userplatform/another", "feed2");
-        Collection<String> result = tree.find("/projects/userplatform/another");
-        Assert.assertTrue(result.contains("feed2"));
-
-        result = tree.find("/projects/userplatform/2014-07-07", regexAlgorithm);
-        Assert.assertTrue(result.contains("feed1"));
-    }
-
-    @Test
-    public void testNoOverlap() {
-        tree.insert("water", "No Overlap");
-        Assert.assertEquals(tree.getSize(), 4);
-    }
-
-    @Test
-    public void testInputKeySubset() {
-        tree.insert("rand", "Input Subset");
-        Assert.assertEquals(tree.getSize(), 4);
-
-    }
-
-    @Test
-    public void testInputKeySuperset() {
-        tree.insert("randomiser", "Input Superset");
-        Assert.assertEquals(tree.getSize(), 4);
-    }
-
-
-    @Test
-    public void testInputKeyPathStyle() {
-        tree.insert("/data/cas/projects/", "path");
-        Assert.assertEquals(tree.getSize(), 4);
-        Assert.assertTrue(tree.find("/data/cas/projects/").contains("path"));
-    }
-
-
-    // Tests for find String
-    @Test
-    public void testSubstringPathFind() {
-        tree.insert("/data/cas/projects/rtbd/", "rtbd");
-        tree.insert("/data/cas/projects/dwh/", "dwh");
-        Assert.assertEquals(tree.getSize(), 5);
-        Assert.assertTrue(tree.find("/data/cas/projects/rtbd/").contains("rtbd"));
-        Assert.assertTrue(tree.find("/data/cas/projects/dwh/").contains("dwh"));
-        Assert.assertNull(tree.find("/data/cas/projects/"));
-    }
-
-    @Test
-    public void testStringSplitFind() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        Assert.assertTrue(tree.find("rand").contains("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertTrue(tree.find("randomizer").contains("randomizer"));
-
-    }
-
-    //Tests for find using regular expression
-    @Test
-    public void testFindUsingRegex() {
-        tree.insert("/data/cas/${YEAR}/", "rtbd");
-        Assert.assertTrue(tree.find("/data/cas/2014/", regexAlgorithm).contains("rtbd"));
-        Assert.assertNull(tree.find("/data/cas/", regexAlgorithm));
-        Assert.assertNull(tree.find("/data/cas/2014/09", regexAlgorithm));
-        Assert.assertNull(tree.find("/data/cas/${YEAR}/", regexAlgorithm));
-
-        tree.insert("/data/cas/${YEAR}/colo", "local");
-        tree.insert("/data/cas/${YEAR}/colo", "duplicate-local");
-        Assert.assertNull(tree.find("/data/cas/${YEAR}/", regexAlgorithm));
-        Assert.assertNull(tree.find("/data/cas/${YEAR}/colo", regexAlgorithm));
-        Assert.assertNull(tree.find("/data/cas/", regexAlgorithm));
-        Assert.assertTrue(tree.find("/data/cas/2014/", regexAlgorithm).contains("rtbd"));
-        Assert.assertTrue(tree.find("/data/cas/2014/colo", regexAlgorithm).contains("local"));
-        Assert.assertTrue(tree.find("/data/cas/2014/colo", regexAlgorithm).contains("duplicate-local"));
-
-
-    }
-
-    // Tests for delete method
-    @Test
-    public void testDeleteChildOfTerminal() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        Assert.assertTrue(tree.delete("randomizer", "randomizer"));
-        Assert.assertNull(tree.find("randomizer"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-    }
-
-    @Test
-    public void testMarkingNonTerminal() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        tree.delete("rand", "rand");
-        Assert.assertNull(tree.find("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertTrue(tree.find("randomizer").contains("randomizer"));
-    }
-
-    @Test
-    public void testDoubleDelete() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        Assert.assertTrue(tree.delete("rand", "rand"));
-        Assert.assertFalse(tree.delete("rand", "rand"));
-        Assert.assertNull(tree.find("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertTrue(tree.find("randomizer").contains("randomizer"));
-    }
-
-    @Test
-    public void testChildCompactionDelete() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        Assert.assertTrue(tree.delete("random", "random"));
-        Assert.assertNull(tree.find("random"));
-        Assert.assertTrue(tree.find("rand").contains("rand"));
-        Assert.assertTrue(tree.find("randomizer").contains("randomizer"));
-        Assert.assertEquals(tree.getSize(), 4);
-    }
-
-    @Test
-    public void testParentCompactionDelete() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-        Assert.assertTrue(tree.delete("randomizer", "randomizer"));
-        Assert.assertNull(tree.find("randomizer"));
-        Assert.assertTrue(tree.find("rand").contains("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertEquals(tree.getSize(), 4);
-
-    }
-
-    @Test
-    public void testSequencesOfDelete() {
-        tree.insert("rand", "rand");
-        tree.insert("randomizer", "randomizer");
-
-        Assert.assertTrue(tree.delete("randomizer", "randomizer"));
-        Assert.assertNull(tree.find("randomizer"));
-        Assert.assertTrue(tree.find("rand").contains("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertEquals(tree.getSize(), 4);
-
-        Assert.assertTrue(tree.delete("rand", "rand"));
-        Assert.assertNull(tree.find("rand"));
-        Assert.assertTrue(tree.find("random").contains("random"));
-        Assert.assertEquals(tree.getSize(), 3);
-
-        Assert.assertTrue(tree.delete("random", "random"));
-        Assert.assertNull(tree.find("random"));
-        Assert.assertEquals(tree.getSize(), 2);
-
-    }
-
-    @Test
-    public void testRootNotCompactedInDelete() {
-        Assert.assertTrue(tree.delete("random", "random"));
-        Assert.assertTrue(tree.delete("key2", "value2"));
-        tree.insert("water", "water");
-        Assert.assertTrue(tree.find("water").contains("water"));
-    }
-
-    @Test
-    public void testDeleteFromListAndChildren() {
-        //check that a delete of a key with multiple values and children is handled
-        tree.insert("keyWithManyValuesAndChild", "value1");
-        tree.insert("keyWithManyValuesAndChild", "value2");
-        tree.insert("keyWithManyValuesAndChildren", "childValue");
-        Assert.assertTrue(tree.delete("keyWithManyValuesAndChild", "value1"));
-    }
-
-    @Test
-    public void testDeleteNonExistent() {
-        Assert.assertFalse(tree.delete("zzz", "zzz"));
-    }
-
-    @Test
-    public void testDeleteSubstring() {
-        Assert.assertFalse(tree.delete("ke", "ke"));
-    }
-
-    @Test
-    public void testDeleteNonTerminal() {
-        Assert.assertFalse(tree.delete("key", "key"));
-    }
-
-
-    @Test
-    public void testDeleteBlankOrEmptyOrNullString(){
-        Assert.assertFalse(tree.delete("", ""));
-        Assert.assertFalse(tree.delete(" ", " "));
-        Assert.assertFalse(tree.delete(null, null));
-    }
-
-    @Test
-    public void testAllSuffixForFirstLevelKey() {
-        tree.insert("key123", "Key was key123");
-        tree.insert("key124", "Key was key124");
-        List<String> result = tree.findSuffixChildren("key", 2);
-        Assert.assertEquals(result.size(), 2);
-        Assert.assertTrue(result.contains("1"));
-        Assert.assertTrue(result.contains("2"));
-    }
-
-    @Test
-    public void testAllSuffixForNestedLevelKey() {
-        tree.insert("key123", "Key was key123");
-        tree.insert("key124", "Key was key124");
-        Assert.assertEquals(tree.findSuffixChildren("key1", 2).size(), 1);
-        Assert.assertEquals(tree.findSuffixChildren("key1", 2).get(0), "2");
-    }
-
-    @Test
-    public void testFeedPropertiesEquals() {
-        FeedLookupResult.FeedProperties f1 = new FeedLookupResult.FeedProperties("feed",
-                LocationType.DATA, "cluster");
-        FeedLookupResult.FeedProperties f1Copy = new FeedLookupResult.FeedProperties("feed",
-                LocationType.DATA, "cluster");
-        FeedLookupResult.FeedProperties f3 = new FeedLookupResult.FeedProperties("anotherFeed",
-                LocationType.DATA, "cluster");
-        FeedLookupResult.FeedProperties f4 = new FeedLookupResult.FeedProperties("feed",
-                LocationType.STATS, "cluster");
-        FeedLookupResult.FeedProperties f5 = new FeedLookupResult.FeedProperties("feed",
-                LocationType.DATA, "anotherCluster");
-
-        Assert.assertTrue(f1.equals(f1Copy));
-        Assert.assertFalse(f1.equals(f3));
-        Assert.assertFalse(f1.equals(f4));
-        Assert.assertFalse(f1.equals(f5));
-
-    }
-
-    @Test
-    public void testMultipleValues(){
-        tree.insert("keyWithMultipleValues", "value1");
-        tree.insert("keyWithMultipleValues", "value2");
-        Assert.assertEquals(tree.find("keyWithMultipleValues").size(), 2);
-        Assert.assertTrue(tree.find("keyWithMultipleValues").contains("value1"));
-        Assert.assertTrue(tree.find("keyWithMultipleValues").contains("value2"));
-
-        tree.delete("keyWithMultipleValues", "value1");
-        Assert.assertTrue(tree.find("keyWithMultipleValues").contains("value2"));
-        Assert.assertFalse(tree.find("keyWithMultipleValues").contains("value1"));
-
-        tree.delete("keyWithMultipleValues", "value2");
-        Assert.assertNull(tree.find("keyWithMultipleValues"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/util/ReflectionUtilsTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/util/ReflectionUtilsTest.java b/common/src/test/java/org/apache/falcon/util/ReflectionUtilsTest.java
deleted file mode 100644
index bc0bce0..0000000
--- a/common/src/test/java/org/apache/falcon/util/ReflectionUtilsTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.parser.ClusterEntityParser;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- * Tests ReflectionUtils.
- */
-@Test
-public class ReflectionUtilsTest {
-    public void testGetInstance() throws FalconException {
-        //with 1 arg constructor, arg null
-        Object e = ReflectionUtils.getInstanceByClassName("org.apache.falcon.FalconException", Throwable.class, null);
-        Assert.assertTrue(e instanceof  FalconException);
-
-        //with 1 arg constructor, arg not null
-        e = ReflectionUtils.getInstanceByClassName("org.apache.falcon.FalconException", Throwable.class,
-            new Throwable());
-        Assert.assertTrue(e instanceof  FalconException);
-
-        //no constructor, using get() method
-        e = ReflectionUtils.getInstanceByClassName("org.apache.falcon.util.StartupProperties");
-        Assert.assertTrue(e instanceof  StartupProperties);
-
-        //with empty constructor
-        e = ReflectionUtils.getInstanceByClassName("org.apache.falcon.entity.parser.ClusterEntityParser");
-        Assert.assertTrue(e instanceof ClusterEntityParser);
-    }
-}


[41/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/FeedHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/FeedHelper.java b/common/src/main/java/org/apache/falcon/entity/FeedHelper.java
deleted file mode 100644
index cca2d8b..0000000
--- a/common/src/main/java/org/apache/falcon/entity/FeedHelper.java
+++ /dev/null
@@ -1,1292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.LifeCycle;
-import org.apache.falcon.Tag;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.datasource.DatasourceType;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.FieldIncludeExclude;
-import org.apache.falcon.entity.v0.feed.Lifecycle;
-import org.apache.falcon.entity.v0.feed.Load;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.MergeType;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.feed.RetentionStage;
-import org.apache.falcon.entity.v0.feed.Sla;
-import org.apache.falcon.entity.v0.feed.Validity;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.lifecycle.FeedLifecycleStage;
-import org.apache.falcon.resource.APIResult;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-import org.apache.falcon.util.BuildProperties;
-import org.apache.falcon.util.DateUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.regex.Matcher;
-
-/**
- * Feed entity helper methods.
- */
-public final class FeedHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(FeedHelper.class);
-    private static final int ONE_MS = 1;
-
-    public static final String FORMAT = "yyyyMMddHHmm";
-
-    private FeedHelper() {}
-
-    public static Cluster getCluster(Feed feed, String clusterName) {
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            if (cluster.getName().equals(clusterName)) {
-                return cluster;
-            }
-        }
-        return null;
-    }
-
-    public static Storage createStorage(Feed feed) throws FalconException {
-
-        final Locations feedLocations = feed.getLocations();
-        if (feedLocations != null
-                && feedLocations.getLocations().size() != 0) {
-            return new FileSystemStorage(feed);
-        }
-
-        try {
-            final CatalogTable table = feed.getTable();
-            if (table != null) {
-                return new CatalogStorage(feed);
-            }
-        } catch (URISyntaxException e) {
-            throw new FalconException(e);
-        }
-
-        throw new FalconException("Both catalog and locations are not defined.");
-    }
-
-    public static Storage createStorage(org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-                                        Feed feed) throws FalconException {
-        return createStorage(getCluster(feed, clusterEntity.getName()), feed, clusterEntity);
-    }
-
-    public static Storage createStorage(String clusterName, Feed feed)
-        throws FalconException {
-
-        return createStorage(getCluster(feed, clusterName), feed);
-    }
-
-    public static Storage createStorage(Cluster cluster, Feed feed)
-        throws FalconException {
-
-        final org.apache.falcon.entity.v0.cluster.Cluster clusterEntity =
-                EntityUtil.getEntity(EntityType.CLUSTER, cluster.getName());
-
-        return createStorage(cluster, feed, clusterEntity);
-    }
-
-    public static Storage createStorage(Cluster cluster, Feed feed,
-                                        org.apache.falcon.entity.v0.cluster.Cluster clusterEntity)
-        throws FalconException {
-
-        final List<Location> locations = getLocations(cluster, feed);
-        if (locations != null) {
-            return new FileSystemStorage(ClusterHelper.getStorageUrl(clusterEntity), locations);
-        }
-
-        try {
-            final CatalogTable table = getTable(cluster, feed);
-            if (table != null) {
-                return new CatalogStorage(clusterEntity, table);
-            }
-        } catch (URISyntaxException e) {
-            throw new FalconException(e);
-        }
-
-        throw new FalconException("Both catalog and locations are not defined.");
-    }
-
-    /**
-     * Factory method to dole out a storage instance used for replication source.
-     *
-     * @param clusterEntity cluster entity
-     * @param feed feed entity
-     * @return an implementation of Storage
-     * @throws FalconException
-     */
-    public static Storage createReadOnlyStorage(org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-                                                Feed feed) throws FalconException {
-        Cluster feedCluster = getCluster(feed, clusterEntity.getName());
-        final List<Location> locations = getLocations(feedCluster, feed);
-        if (locations != null) {
-            return new FileSystemStorage(ClusterHelper.getReadOnlyStorageUrl(clusterEntity), locations);
-        }
-
-        try {
-            final CatalogTable table = getTable(feedCluster, feed);
-            if (table != null) {
-                return new CatalogStorage(clusterEntity, table);
-            }
-        } catch (URISyntaxException e) {
-            throw new FalconException(e);
-        }
-
-        throw new FalconException("Both catalog and locations are not defined.");
-    }
-
-    public static Storage createStorage(String type, String storageUriTemplate)
-        throws URISyntaxException {
-
-        Storage.TYPE storageType = Storage.TYPE.valueOf(type);
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            return new FileSystemStorage(storageUriTemplate);
-        } else if (storageType == Storage.TYPE.TABLE) {
-            return new CatalogStorage(storageUriTemplate);
-        }
-
-        throw new IllegalArgumentException("Bad type: " + type);
-    }
-
-    public static Storage createStorage(String type, String storageUriTemplate,
-                                        Configuration conf) throws URISyntaxException {
-        Storage.TYPE storageType = Storage.TYPE.valueOf(type);
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            return new FileSystemStorage(storageUriTemplate);
-        } else if (storageType == Storage.TYPE.TABLE) {
-            return new CatalogStorage(storageUriTemplate, conf);
-        }
-
-        throw new IllegalArgumentException("Bad type: " + type);
-    }
-
-    public static Storage.TYPE getStorageType(Feed feed) throws FalconException {
-        final Locations feedLocations = feed.getLocations();
-        if (feedLocations != null
-                && feedLocations.getLocations().size() != 0) {
-            return Storage.TYPE.FILESYSTEM;
-        }
-
-        final CatalogTable table = feed.getTable();
-        if (table != null) {
-            return Storage.TYPE.TABLE;
-        }
-
-        throw new FalconException("Both catalog and locations are not defined.");
-    }
-
-    public static Storage.TYPE getStorageType(Feed feed,
-                                              Cluster cluster) throws FalconException {
-        final List<Location> locations = getLocations(cluster, feed);
-        if (locations != null) {
-            return Storage.TYPE.FILESYSTEM;
-        }
-
-        final CatalogTable table = getTable(cluster, feed);
-        if (table != null) {
-            return Storage.TYPE.TABLE;
-        }
-
-        throw new FalconException("Both catalog and locations are not defined.");
-    }
-
-    public static Storage.TYPE getStorageType(Feed feed,
-                                              org.apache.falcon.entity.v0.cluster.Cluster clusterEntity)
-        throws FalconException {
-        Cluster feedCluster = getCluster(feed, clusterEntity.getName());
-        return getStorageType(feed, feedCluster);
-    }
-
-    public static List<Location> getLocations(Cluster cluster, Feed feed) {
-        // check if locations are overridden in cluster
-        final Locations clusterLocations = cluster.getLocations();
-        if (clusterLocations != null
-                && clusterLocations.getLocations().size() != 0) {
-            return clusterLocations.getLocations();
-        }
-
-        Locations feedLocations = feed.getLocations();
-        return feedLocations == null ? null : feedLocations.getLocations();
-    }
-
-    public static Location getLocation(Feed feed, org.apache.falcon.entity.v0.cluster.Cluster cluster,
-                                       LocationType type) {
-        List<Location> locations = getLocations(getCluster(feed, cluster.getName()), feed);
-        if (locations != null) {
-            for (Location location : locations) {
-                if (location.getType() == type) {
-                    return location;
-                }
-            }
-        }
-
-        return null;
-    }
-
-    public static Sla getSLA(Cluster cluster, Feed feed) {
-        final Sla clusterSla = cluster.getSla();
-        if (clusterSla != null) {
-            return clusterSla;
-        }
-        final Sla feedSla = feed.getSla();
-        return feedSla == null ? null : feedSla;
-    }
-
-    public static Sla getSLA(String clusterName, Feed feed) {
-        Cluster cluster = FeedHelper.getCluster(feed, clusterName);
-        return cluster != null ? getSLA(cluster, feed) : null;
-    }
-
-    protected static CatalogTable getTable(Cluster cluster, Feed feed) {
-        // check if table is overridden in cluster
-        if (cluster.getTable() != null) {
-            return cluster.getTable();
-        }
-
-        return feed.getTable();
-    }
-
-    public static String normalizePartitionExpression(String part1, String part2) {
-        String partExp = StringUtils.stripToEmpty(part1) + "/" + StringUtils.stripToEmpty(part2);
-        partExp = partExp.replaceAll("//+", "/");
-        partExp = StringUtils.stripStart(partExp, "/");
-        partExp = StringUtils.stripEnd(partExp, "/");
-        return partExp;
-    }
-
-    public static String normalizePartitionExpression(String partition) {
-        return normalizePartitionExpression(partition, null);
-    }
-
-    public static Properties getClusterProperties(org.apache.falcon.entity.v0.cluster.Cluster cluster) {
-        Properties properties = new Properties();
-        Map<String, String> clusterVars = new HashMap<>();
-        clusterVars.put("colo", cluster.getColo());
-        clusterVars.put("name", cluster.getName());
-        if (cluster.getProperties() != null) {
-            for (org.apache.falcon.entity.v0.cluster.Property property : cluster.getProperties().getProperties()) {
-                clusterVars.put(property.getName(), property.getValue());
-            }
-        }
-        properties.put("cluster", clusterVars);
-        return properties;
-    }
-
-    public static String evaluateClusterExp(org.apache.falcon.entity.v0.cluster.Cluster clusterEntity, String exp)
-        throws FalconException {
-
-        Properties properties = getClusterProperties(clusterEntity);
-        ExpressionHelper expHelp = ExpressionHelper.get();
-        expHelp.setPropertiesForVariable(properties);
-        return expHelp.evaluateFullExpression(exp, String.class);
-    }
-
-    public static String getStagingPath(boolean isSource,
-                                        org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-                                        Feed feed, CatalogStorage storage, Tag tag, String suffix) {
-        String stagingDirPath = getStagingDir(isSource, clusterEntity, feed, storage, tag);
-
-        String datedPartitionKey = storage.getDatedPartitionKeys().get(0);
-        String datedPartitionKeySuffix = datedPartitionKey + "=${coord:dataOutPartitionValue('output',"
-                + "'" + datedPartitionKey + "')}";
-        return stagingDirPath + "/"
-                + datedPartitionKeySuffix + "/"
-                + suffix + "/"
-                + "data";
-    }
-
-    public static String getStagingDir(boolean isSource,
-                                       org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-                                       Feed feed, CatalogStorage storage, Tag tag) {
-        String workflowName = EntityUtil.getWorkflowName(
-                tag, Arrays.asList(clusterEntity.getName()), feed).toString();
-
-        // log path is created at scheduling wf
-        final String storageUri = isSource
-                ? ClusterHelper.getReadOnlyStorageUrl(clusterEntity) // read interface
-                : ClusterHelper.getStorageUrl(clusterEntity);        // write interface
-        return storageUri
-                + EntityUtil.getLogPath(clusterEntity, feed) + "/"
-                + workflowName + "/"
-                + storage.getDatabase() + "/"
-                + storage.getTable();
-    }
-
-    public static Properties getUserWorkflowProperties(LifeCycle lifeCycle) {
-        Properties props = new Properties();
-        props.put("userWorkflowName", lifeCycle.name().toLowerCase() + "-policy");
-        props.put("userWorkflowEngine", "falcon");
-
-        String version;
-        try {
-            version = BuildProperties.get().getProperty("build.version");
-        } catch (Exception e) {  // unfortunate that this is only available in prism/webapp
-            version = "0.6";
-        }
-        props.put("userWorkflowVersion", version);
-        return props;
-    }
-
-    public static Properties getFeedProperties(Feed feed) {
-        Properties feedProperties = new Properties();
-        if (feed.getProperties() != null) {
-            for (org.apache.falcon.entity.v0.feed.Property property : feed.getProperties().getProperties()) {
-                feedProperties.put(property.getName(), property.getValue());
-            }
-        }
-        return feedProperties;
-    }
-
-    public static Lifecycle getLifecycle(Feed feed, String clusterName) throws FalconException {
-        Cluster cluster = getCluster(feed, clusterName);
-        if (cluster !=null) {
-            return cluster.getLifecycle() != null ? cluster.getLifecycle() : feed.getLifecycle();
-        }
-        throw new FalconException("Cluster: " + clusterName + " isn't valid for feed: " + feed.getName());
-    }
-
-    public static RetentionStage getRetentionStage(Feed feed, String clusterName) throws FalconException {
-        if (isLifecycleEnabled(feed, clusterName)) {
-            Lifecycle globalLifecycle = feed.getLifecycle();
-            Lifecycle clusterLifecycle = getCluster(feed, clusterName).getLifecycle();
-
-            if (clusterLifecycle != null && clusterLifecycle.getRetentionStage() != null) {
-                return clusterLifecycle.getRetentionStage();
-            } else if (globalLifecycle != null) {
-                return globalLifecycle.getRetentionStage();
-            }
-        }
-        return null;
-    }
-
-    public static Date getFeedValidityStart(Feed feed, String clusterName) throws FalconException {
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, clusterName);
-        if (feedCluster != null) {
-            return feedCluster.getValidity().getStart();
-        } else {
-            throw new FalconException("No matching cluster " + clusterName
-                    + "found for feed " + feed.getName());
-        }
-    }
-
-    public static Date getNextFeedInstanceDate(Date alignedDate, Feed feed) {
-        Calendar calendar = Calendar.getInstance();
-        calendar.setTime(alignedDate);
-        calendar.add(feed.getFrequency().getTimeUnit().getCalendarUnit(),
-                feed.getFrequency().getFrequencyAsInt());
-        return calendar.getTime();
-    }
-
-    /**
-     * Returns various policies applicable for a feed.
-     *
-     * @param feed
-     * @return list of names of lifecycle policies for the given feed, empty list if there are none.
-     */
-    public static List<String> getPolicies(Feed feed, String clusterName) throws FalconException {
-        List<String> result = new ArrayList<>();
-        Cluster cluster = getCluster(feed, clusterName);
-        if (cluster != null) {
-            if (isLifecycleEnabled(feed, clusterName)) {
-                String policy = getRetentionStage(feed, clusterName).getPolicy();
-                policy = StringUtils.isBlank(policy)
-                        ? FeedLifecycleStage.RETENTION.getDefaultPolicyName() : policy;
-                result.add(policy);
-            }
-            return result;
-        }
-        throw new FalconException("Cluster: " + clusterName + " isn't valid for feed: " + feed.getName());
-    }
-
-    /**
-     *  Extracts date from the actual data path e.g., /path/2014/05/06 maps to 2014-05-06T00:00Z.
-     * @param instancePath - actual data path
-     * @param templatePath - template path from feed definition
-     * @param timeZone timeZone
-     * @return date corresponding to the path
-     */
-    //consider just the first occurrence of the pattern
-    public static Date getDate(String templatePath, Path instancePath, TimeZone timeZone) {
-        String path = instancePath.toString();
-        Matcher matcher = FeedDataPath.PATTERN.matcher(templatePath);
-        Calendar cal = Calendar.getInstance(timeZone);
-        int lastEnd = 0;
-
-        Set<FeedDataPath.VARS> matchedVars = new HashSet<>();
-        while (matcher.find()) {
-            FeedDataPath.VARS pathVar = FeedDataPath.VARS.from(matcher.group());
-            String pad = templatePath.substring(lastEnd, matcher.start());
-            if (!path.startsWith(pad)) {
-                //Template and path do not match
-                return null;
-            }
-
-            int value;
-            try {
-                value = Integer.parseInt(path.substring(pad.length(), pad.length() + pathVar.getValueSize()));
-            } catch (NumberFormatException e) {
-                //Not a valid number for variable
-                return null;
-            }
-
-            pathVar.setCalendar(cal, value);
-            lastEnd = matcher.end();
-            path = path.substring(pad.length() + pathVar.getValueSize());
-            matchedVars.add(pathVar);
-        }
-
-        String remTemplatePath = templatePath.substring(lastEnd);
-        //Match the remaining constant at the end
-        //Handling case where feed instancePath has partitions
-        if (StringUtils.isNotEmpty(path) && StringUtils.isNotEmpty(remTemplatePath)
-                && !path.contains(remTemplatePath)) {
-            return null;
-        }
-
-
-        //Reset other fields
-        for (FeedDataPath.VARS var : FeedDataPath.VARS.values()) {
-            if (!matchedVars.contains(var)) {
-                switch (var.getCalendarField()) {
-                case Calendar.DAY_OF_MONTH:
-                    cal.set(var.getCalendarField(), 1);
-                    break;
-                default:
-                    cal.set(var.getCalendarField(), 0);
-                }
-            }
-            cal.set(Calendar.SECOND, 0);
-            cal.set(Calendar.MILLISECOND, 0);
-        }
-        return cal.getTime();
-    }
-
-    public static Path getFeedBasePath(String feedPath) throws IOException {
-        Matcher matcher = FeedDataPath.PATTERN.matcher(feedPath);
-        if (matcher.find()) {
-            return new Path(feedPath.substring(0, matcher.start()));
-        } else {
-            throw new IOException("Unable to resolve pattern for feedPath: " + feedPath);
-        }
-
-    }
-
-    private static void validateFeedInstance(Feed feed, Date instanceTime,
-                                             org.apache.falcon.entity.v0.cluster.Cluster cluster) {
-
-        // validate the cluster
-        Cluster feedCluster = getCluster(feed, cluster.getName());
-        if (feedCluster == null) {
-            throw new IllegalArgumentException("Cluster :" + cluster.getName() + " is not a valid cluster for feed:"
-                    + feed.getName());
-        }
-
-        // validate that instanceTime is in validity range
-        if (feedCluster.getValidity().getStart().after(instanceTime)
-                || !feedCluster.getValidity().getEnd().after(instanceTime)) {
-            throw new IllegalArgumentException("instanceTime: " + instanceTime + " is not in validity range for"
-                    + " Feed: " + feed.getName() + " on cluster:" + cluster.getName());
-        }
-
-        // validate instanceTime on basis of startTime and frequency
-        Date nextInstance = EntityUtil.getNextStartTime(feedCluster.getValidity().getStart(), feed.getFrequency(),
-                feed.getTimezone(), instanceTime);
-        if (!nextInstance.equals(instanceTime)) {
-            throw new IllegalArgumentException("instanceTime: " + instanceTime + " is not a valid instance for the "
-                    + " feed: " + feed.getName() + " on cluster: " + cluster.getName()
-                    + " on the basis of startDate and frequency");
-        }
-    }
-
-    /**
-    * Given a feed Instance finds the generating process instance.
-    *
-    * [process, cluster, instanceTime]
-    *
-    * If the feed is replicated, then it returns null.
-    *
-    * @param feed output feed
-    * @param feedInstanceTime instance time of the feed
-    * @return returns the instance of the process which produces the given feed
-            */
-    public static SchedulableEntityInstance getProducerInstance(Feed feed, Date feedInstanceTime,
-        org.apache.falcon.entity.v0.cluster.Cluster cluster) throws FalconException {
-
-        //validate the inputs
-        validateFeedInstance(feed, feedInstanceTime, cluster);
-        Process process = getProducerProcess(feed);
-        if (process != null) {
-            org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process,
-                    cluster.getName());
-            Date pStart = processCluster.getValidity().getStart();
-            Date pEnd = processCluster.getValidity().getEnd();
-            Frequency pFrequency = process.getFrequency();
-            TimeZone pTz = process.getTimezone();
-
-            try {
-                Date processInstanceTime = getProducerInstanceTime(feed, feedInstanceTime, process, cluster);
-                boolean isValid = EntityUtil.isValidInstanceTime(pStart, pFrequency, pTz, processInstanceTime);
-                if (processInstanceTime.before(pStart) || !processInstanceTime.before(pEnd) || !isValid){
-                    return null;
-                }
-
-                SchedulableEntityInstance producer = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                        processInstanceTime, EntityType.PROCESS);
-                producer.setTags(SchedulableEntityInstance.OUTPUT);
-                return producer;
-            } catch (FalconException | IllegalArgumentException e) {
-                LOG.error("Error in trying to get producer process: {}'s instance time for feed: {}'s instance: } "
-                        + " on cluster:{}", process.getName(), feed.getName(), feedInstanceTime, cluster.getName());
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Given a feed find it's generating process.
-     *
-     * If no generating process is found it returns null.
-     * @param feed output feed
-     * @return Process which produces the given feed.
-     */
-    public static Process getProducerProcess(Feed feed) throws FalconException {
-
-        EntityList dependencies = EntityUtil.getEntityDependencies(feed);
-
-        for (EntityList.EntityElement e : dependencies.getElements()) {
-            if (e.tag.contains(EntityList.OUTPUT_TAG)) {
-                return EntityUtil.getEntity(EntityType.PROCESS, e.name);
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Find the producerInstance which will generate the given feedInstance.
-     *
-     * @param feed output feed
-     * @param feedInstanceTime instance time of the output feed
-     * @param producer producer process
-     * @return time of the producer instance which will produce the given feed instance.
-     */
-    private static Date getProducerInstanceTime(Feed feed, Date feedInstanceTime, Process producer,
-                                       org.apache.falcon.entity.v0.cluster.Cluster cluster) throws FalconException {
-
-        String clusterName = cluster.getName();
-        Cluster feedCluster = getCluster(feed, clusterName);
-        org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(producer, clusterName);
-        Date producerStartDate = processCluster.getValidity().getStart();
-
-        // read the process definition and find the relative time difference between process and output feed
-        // if output process instance time is now then output FeedInstance time is x
-        String outputInstance = null;
-        for (Output op : producer.getOutputs().getOutputs()) {
-            if (StringUtils.equals(feed.getName(), op.getFeed())) {
-                outputInstance = op.getInstance();
-            }
-        }
-
-        ExpressionHelper.setReferenceDate(producerStartDate);
-        ExpressionHelper evaluator = ExpressionHelper.get();
-        // producerInstance = feedInstanceTime + (difference between producer process and feed)
-        // the feedInstance before or equal to this time is the required one
-        Date relativeFeedInstance = evaluator.evaluate(outputInstance, Date.class);
-        Date feedInstanceActual = EntityUtil.getPreviousInstanceTime(feedCluster.getValidity().getStart(),
-                feed.getFrequency(), feed.getTimezone(), relativeFeedInstance);
-        Long producerInstanceTime = feedInstanceTime.getTime() + (producerStartDate.getTime()
-                - feedInstanceActual.getTime());
-        Date producerInstance = new Date(producerInstanceTime);
-
-        //validate that the producerInstance is in the validity range on the provided cluster
-        if (producerInstance.before(processCluster.getValidity().getStart())
-                || producerInstance.after(processCluster.getValidity().getEnd())) {
-            throw new IllegalArgumentException("Instance time provided: " + feedInstanceTime
-                    + " for feed " + feed.getName()
-                    + " is outside the range of instances produced by the producer process: " + producer.getName()
-                    + " in it's validity range on provided cluster: " + cluster.getName());
-        }
-        return producerInstance;
-    }
-
-
-    public static Set<SchedulableEntityInstance> getConsumerInstances(Feed feed, Date feedInstanceTime,
-                  org.apache.falcon.entity.v0.cluster.Cluster cluster) throws FalconException {
-
-        Set<SchedulableEntityInstance> result = new HashSet<>();
-        // validate that the feed has this cluster & validate that the instanceTime is a valid instanceTime
-        validateFeedInstance(feed, feedInstanceTime, cluster);
-
-        Set<Process> consumers = getConsumerProcesses(feed);
-        for (Process p : consumers) {
-            Set<Date> consumerInstanceTimes = getConsumerProcessInstanceTimes(feed, feedInstanceTime, p, cluster);
-            for (Date date : consumerInstanceTimes) {
-                SchedulableEntityInstance in = new SchedulableEntityInstance(p.getName(), cluster.getName(), date,
-                        EntityType.PROCESS);
-                in.setTags(SchedulableEntityInstance.INPUT);
-                result.add(in);
-            }
-        }
-        return result;
-    }
-
-
-    /**
-     * Returns the consumer processes for a given feed if any, null otherwise.
-     *
-     * @param feed input feed
-     * @return the set of processes which use the given feed as input, empty set if no consumers.
-     */
-    public static Set<Process> getConsumerProcesses(Feed feed) throws FalconException {
-        Set<Process> result = new HashSet<>();
-        EntityList dependencies = EntityUtil.getEntityDependencies(feed);
-
-        for (EntityList.EntityElement e : dependencies.getElements()) {
-            if (e.tag.contains(EntityList.INPUT_TAG)) {
-                Process consumer = EntityUtil.getEntity(EntityType.PROCESS, e.name);
-                result.add(consumer);
-            }
-        }
-        return result;
-    }
-
-    // return all instances of a process which will consume the given feed instance
-    private static Set<Date> getConsumerProcessInstanceTimes(Feed feed, Date feedInstancetime, Process consumer,
-              org.apache.falcon.entity.v0.cluster.Cluster cluster) throws FalconException {
-
-        Set<Date> result = new HashSet<>();
-        // find relevant cluster for the process
-        org.apache.falcon.entity.v0.process.Cluster processCluster =
-                ProcessHelper.getCluster(consumer, cluster.getName());
-        if (processCluster == null) {
-            throw new IllegalArgumentException("Cluster is not valid for process");
-        }
-        Date processStartDate = processCluster.getValidity().getStart();
-        Cluster feedCluster = getCluster(feed, cluster.getName());
-        Date feedStartDate = feedCluster.getValidity().getStart();
-
-        // find all corresponding Inputs as a process may refer same feed multiple times
-        List<Input> inputFeeds = new ArrayList<>();
-        if (consumer.getInputs() != null && consumer.getInputs().getInputs() != null) {
-            for (Input input : consumer.getInputs().getInputs()) {
-                if (StringUtils.equals(input.getFeed(), feed.getName())) {
-                    inputFeeds.add(input);
-                }
-            }
-        }
-
-        // for each input corresponding to given feed, find corresponding consumer instances
-        for (Input in : inputFeeds) {
-            /* Algorithm for finding a consumer instance for an input feed instance
-            Step 1. Find one instance which will consume the given feed instance.
-                    a. take process start date and find last input feed instance time. In this step take care of
-                        frequencies being out of sync.
-                    b. using the above find the time difference between the process instance and feed instance.
-                    c. Adding the above time difference to given feed instance for which we want to find the consumer
-                        instances we will get one consumer process instance.
-            Step 2. Keep checking for next instances of process till they consume the given feed Instance.
-            Step 3. Similarly check for all previous instances of process till they consume the given feed instance.
-            */
-
-            // Step 1.a & 1.b
-            ExpressionHelper.setReferenceDate(processStartDate);
-            ExpressionHelper evaluator = ExpressionHelper.get();
-            Date startRelative = evaluator.evaluate(in.getStart(), Date.class);
-            Date startTimeActual = EntityUtil.getPreviousInstanceTime(feedStartDate,
-                    feed.getFrequency(), feed.getTimezone(), startRelative);
-            Long offset = processStartDate.getTime() - startTimeActual.getTime();
-
-            // Step 1.c
-            Date processInstanceStartRelative = new Date(feedInstancetime.getTime() + offset);
-            Date processInstanceStartActual = EntityUtil.getPreviousInstanceTime(processStartDate,
-                    consumer.getFrequency(), consumer.getTimezone(), processInstanceStartRelative);
-
-
-            // Step 2.
-            Date currentInstance = processInstanceStartActual;
-            while (true) {
-                Date nextConsumerInstance = EntityUtil.getNextStartTime(processStartDate,
-                        consumer.getFrequency(), consumer.getTimezone(), currentInstance);
-
-                ExpressionHelper.setReferenceDate(nextConsumerInstance);
-                evaluator = ExpressionHelper.get();
-                Date inputStart = evaluator.evaluate(in.getStart(), Date.class);
-                Long rangeStart = EntityUtil.getPreviousInstanceTime(feedStartDate, feed.getFrequency(),
-                        feed.getTimezone(), inputStart).getTime();
-                Long rangeEnd = evaluator.evaluate(in.getEnd(), Date.class).getTime();
-                if (rangeStart <= feedInstancetime.getTime() && feedInstancetime.getTime() <= rangeEnd) {
-                    if (!nextConsumerInstance.before(processCluster.getValidity().getStart())
-                            && nextConsumerInstance.before(processCluster.getValidity().getEnd())) {
-                        result.add(nextConsumerInstance);
-                    }
-                } else {
-                    break;
-                }
-                currentInstance = new Date(nextConsumerInstance.getTime() + ONE_MS);
-            }
-
-            // Step 3.
-            currentInstance = processInstanceStartActual;
-            while (true) {
-                Date nextConsumerInstance = EntityUtil.getPreviousInstanceTime(processStartDate,
-                        consumer.getFrequency(), consumer.getTimezone(), currentInstance);
-
-                ExpressionHelper.setReferenceDate(nextConsumerInstance);
-                evaluator = ExpressionHelper.get();
-                Date inputStart = evaluator.evaluate(in.getStart(), Date.class);
-                Long rangeStart = EntityUtil.getPreviousInstanceTime(feedStartDate, feed.getFrequency(),
-                        feed.getTimezone(), inputStart).getTime();
-                Long rangeEnd = evaluator.evaluate(in.getEnd(), Date.class).getTime();
-                if (rangeStart <= feedInstancetime.getTime() && feedInstancetime.getTime() <= rangeEnd) {
-                    if (!nextConsumerInstance.before(processCluster.getValidity().getStart())
-                            && nextConsumerInstance.before(processCluster.getValidity().getEnd())) {
-                        result.add(nextConsumerInstance);
-                    }
-                } else {
-                    break;
-                }
-                currentInstance = new Date(nextConsumerInstance.getTime() - ONE_MS);
-            }
-        }
-        return result;
-    }
-
-    public static FeedInstanceResult getFeedInstanceListing(Entity entityObject,
-                                                            Date start, Date end) throws FalconException {
-        Set<String> clusters = EntityUtil.getClustersDefinedInColos(entityObject);
-        FeedInstanceResult result = new FeedInstanceResult(APIResult.Status.SUCCEEDED, "Success");
-        for (String cluster : clusters) {
-            Feed feed = (Feed) entityObject;
-            Storage storage = createStorage(cluster, feed);
-            List<FeedInstanceStatus> feedListing = storage.getListing(feed, cluster, LocationType.DATA, start, end);
-            FeedInstanceResult.Instance[] instances = new FeedInstanceResult.Instance[feedListing.size()];
-            int index = 0;
-            for (FeedInstanceStatus feedStatus : feedListing) {
-                FeedInstanceResult.Instance instance = new
-                        FeedInstanceResult.Instance(cluster, feedStatus.getInstance(),
-                        feedStatus.getStatus().name());
-                instance.creationTime = feedStatus.getCreationTime();
-                instance.uri = feedStatus.getUri();
-                instance.size = feedStatus.getSize();
-                instance.sizeH = feedStatus.getSizeH();
-                instances[index++] = instance;
-            }
-            result.setInstances(instances);
-        }
-        return result;
-    }
-
-
-    /**
-     * Returns the data source type associated with the Feed's import policy.
-     *
-     * @param clusterEntity
-     * @param feed
-     * @return {@link org.apache.falcon.entity.v0.datasource.DatasourceType}
-     * @throws FalconException
-     */
-    public static DatasourceType getImportDatasourceType(
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-            Feed feed) throws FalconException {
-        Cluster feedCluster = getCluster(feed, clusterEntity.getName());
-        if (isImportEnabled(feedCluster)) {
-            return DatasourceHelper.getDatasourceType(getImportDatasourceName(feedCluster));
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Return if Import policy is enabled in the Feed definition.
-     *
-     * @param feedCluster
-     * @return true if import policy is enabled else false
-     */
-
-    public static boolean isImportEnabled(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (feedCluster.getType() == ClusterType.SOURCE) {
-            return (feedCluster.getImport() != null);
-        }
-        return false;
-    }
-
-
-
-    /**
-     * Returns the data source name associated with the Feed's import policy.
-     *
-     * @param feedCluster
-     * @return DataSource name defined in the Datasource Entity
-     */
-    public static String getImportDatasourceName(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isImportEnabled(feedCluster)) {
-            return feedCluster.getImport().getSource().getName();
-        } else {
-            return null;
-        }
-    }
-
-
-
-    /**
-     * Returns Datasource table name.
-     *
-     * @param feedCluster
-     * @return Table or Topic name of the Datasource
-     */
-
-    public static String getImportDataSourceTableName(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isImportEnabled(feedCluster)) {
-            return feedCluster.getImport().getSource().getTableName();
-        } else {
-            return null;
-        }
-    }
-
-
-
-    /**
-     * Returns the extract method type.
-     *
-     * @param feedCluster
-     * @return {@link org.apache.falcon.entity.v0.feed.ExtractMethod}
-     */
-
-    public static ExtractMethod getImportExtractMethod(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isImportEnabled(feedCluster)) {
-            return feedCluster.getImport().getSource().getExtract().getType();
-        } else {
-            return null;
-        }
-    }
-
-
-
-    /**
-     * Returns the merge type of the Feed import policy.
-     *
-     * @param feedCluster
-     * @return {@link org.apache.falcon.entity.v0.feed.MergeType}
-     */
-    public static MergeType getImportMergeType(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isImportEnabled(feedCluster)) {
-            return feedCluster.getImport().getSource().getExtract().getMergepolicy();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Returns the initial instance date for the import data set for coorinator.
-     *
-     * @param feedCluster
-     * @return Feed cluster validity start date or recent time
-     */
-    public static Date getImportInitalInstance(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        return feedCluster.getValidity().getStart();
-    }
-
-
-    /**
-     * Helper method to check if the merge type is snapshot.
-     *
-     * @param feedCluster
-     * @return true if the feed import policy merge type is snapshot
-     *
-     */
-    public static boolean isSnapshotMergeType(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        return MergeType.SNAPSHOT == getImportMergeType(feedCluster);
-    }
-
-    /**
-     * Returns extra arguments specified in the Feed import policy.
-     *
-     * @param feedCluster
-     * @return
-     * @throws FalconException
-     */
-    public static Map<String, String> getImportArguments(org.apache.falcon.entity.v0.feed.Cluster feedCluster)
-        throws FalconException {
-
-        Map<String, String> argsMap = new HashMap<String, String>();
-        if (feedCluster.getImport().getArguments() == null) {
-            return argsMap;
-        }
-
-        for(org.apache.falcon.entity.v0.feed.Argument p : feedCluster.getImport().getArguments().getArguments()) {
-            argsMap.put(p.getName().toLowerCase(), p.getValue());
-        }
-        return argsMap;
-    }
-
-
-
-
-    /**
-     * Returns Fields list specified in the Import Policy.
-     *
-     * @param feedCluster
-     * @return List of String
-     * @throws FalconException
-     */
-    public static List<String> getImportFieldList(org.apache.falcon.entity.v0.feed.Cluster feedCluster)
-        throws FalconException {
-        if (feedCluster.getImport().getSource().getFields() == null) {
-            return null;
-        }
-        org.apache.falcon.entity.v0.feed.FieldsType fieldType = feedCluster.getImport().getSource().getFields();
-        FieldIncludeExclude includeFileds = fieldType.getIncludes();
-        if (includeFileds == null) {
-            return null;
-        }
-        return includeFileds.getFields();
-    }
-
-
-    /**
-     * Returns true if exclude field lists are used. This is a TBD feature.
-     *
-     * @param ds Feed Datasource
-     * @return true of exclude field list is used or false.
-     * @throws FalconException
-     */
-
-    public static boolean isFieldExcludes(org.apache.falcon.entity.v0.feed.Datasource ds)
-        throws FalconException {
-        if (ds.getFields() != null) {
-            org.apache.falcon.entity.v0.feed.FieldsType fieldType = ds.getFields();
-            FieldIncludeExclude excludeFileds = fieldType.getExcludes();
-            if ((excludeFileds != null) && (excludeFileds.getFields().size() > 0)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    public static FeedInstanceStatus.AvailabilityStatus getFeedInstanceStatus(Feed feed, String clusterName,
-                                                                              Date instanceTime)
-        throws FalconException {
-        Storage storage = createStorage(clusterName, feed);
-        return storage.getInstanceAvailabilityStatus(feed, clusterName, LocationType.DATA, instanceTime);
-    }
-
-    public static boolean isLifecycleEnabled(Feed feed, String clusterName) {
-        Cluster cluster = getCluster(feed, clusterName);
-        return cluster != null && (feed.getLifecycle() != null || cluster.getLifecycle() != null);
-    }
-
-    public static Frequency getLifecycleRetentionFrequency(Feed feed, String clusterName) throws FalconException {
-        Frequency retentionFrequency = null;
-        RetentionStage retentionStage = getRetentionStage(feed, clusterName);
-        if (retentionStage != null) {
-            if (retentionStage.getFrequency() != null) {
-                retentionFrequency = retentionStage.getFrequency();
-            } else {
-                Frequency feedFrequency = feed.getFrequency();
-                Frequency defaultFrequency = new Frequency("hours(6)");
-                if (DateUtil.getFrequencyInMillis(feedFrequency) < DateUtil.getFrequencyInMillis(defaultFrequency)) {
-                    retentionFrequency = defaultFrequency;
-                } else {
-                    retentionFrequency = new Frequency(feedFrequency.toString());
-                }
-            }
-        }
-        return  retentionFrequency;
-    }
-
-    /**
-     * Returns the hadoop cluster queue name specified for the replication jobs to run in the Lifecycle
-     * section of the target cluster section of the feed entity.
-     *
-     * NOTE: Lifecycle for replication is not implemented. This will return the queueName property value.
-     *
-     * @param feed
-     * @param clusterName
-     * @return hadoop cluster queue name specified in the feed entity
-     * @throws FalconException
-     */
-
-    public static String getLifecycleReplicationQueue(Feed feed, String clusterName) throws FalconException {
-        return null;
-    }
-
-    /**
-     * Returns the hadoop cluster queue name specified for the retention jobs to run in the Lifecycle
-     * section of feed entity.
-     *
-     * @param feed
-     * @param clusterName
-     * @return hadoop cluster queue name specified in the feed entity
-     * @throws FalconException
-     */
-    public static String getLifecycleRetentionQueue(Feed feed, String clusterName) throws FalconException {
-        RetentionStage retentionStage = getRetentionStage(feed, clusterName);
-        if (retentionStage != null) {
-            return retentionStage.getQueue();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Returns the data source type associated with the Feed's export policy.
-     *
-     * @param clusterEntity
-     * @param feed
-     * @return {@link org.apache.falcon.entity.v0.datasource.DatasourceType}
-     * @throws FalconException
-     */
-    public static DatasourceType getExportDatasourceType(
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity,
-            Feed feed) throws FalconException {
-        Cluster feedCluster = getCluster(feed, clusterEntity.getName());
-        if (isExportEnabled(feedCluster)) {
-            return DatasourceHelper.getDatasourceType(getExportDatasourceName(feedCluster));
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Return if Export policy is enabled in the Feed definition.
-     *
-     * @param feedCluster
-     * @return true if export policy is enabled else false
-     */
-
-    public static boolean isExportEnabled(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        return (feedCluster.getExport() != null);
-    }
-
-    /**
-     * Returns the data source name associated with the Feed's export policy.
-     *
-     * @param feedCluster
-     * @return DataSource name defined in the Datasource Entity
-     */
-    public static String getExportDatasourceName(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isExportEnabled(feedCluster)) {
-            return feedCluster.getExport().getTarget().getName();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Returns Datasource table name.
-     *
-     * @param feedCluster
-     * @return Table or Topic name of the Datasource
-     */
-
-    public static String getExportDataSourceTableName(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isExportEnabled(feedCluster)) {
-            return feedCluster.getExport().getTarget().getTableName();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Returns the export load type.
-     *
-     * @param feedCluster
-     * @return {@link org.apache.falcon.entity.v0.feed.Load}
-     */
-
-    public static Load getExportLoadMethod(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        if (isExportEnabled(feedCluster)) {
-            return feedCluster.getExport().getTarget().getLoad();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Returns the initial instance date for the export data set for coorinator.
-     *
-     * @param feedCluster
-     * @return Feed cluster validity start date or recent time
-     */
-    public static Date getExportInitalInstance(org.apache.falcon.entity.v0.feed.Cluster feedCluster) {
-        return feedCluster.getValidity().getStart();
-    }
-
-    /**
-     * Returns extra arguments specified in the Feed export policy.
-     *
-     * @param feedCluster
-     * @return
-     * @throws FalconException
-     */
-    public static Map<String, String> getExportArguments(org.apache.falcon.entity.v0.feed.Cluster feedCluster)
-        throws FalconException {
-
-        Map<String, String> argsMap = new HashMap<String, String>();
-        if (feedCluster.getExport().getArguments() == null) {
-            return argsMap;
-        }
-
-        for(org.apache.falcon.entity.v0.feed.Argument p : feedCluster.getExport().getArguments().getArguments()) {
-            argsMap.put(p.getName().toLowerCase(), p.getValue());
-        }
-        return argsMap;
-    }
-
-    public static Validity getClusterValidity(Feed feed, String clusterName) throws FalconException {
-        Cluster cluster = getCluster(feed, clusterName);
-        if (cluster == null) {
-            throw new FalconException("Invalid cluster: " + clusterName + " for feed: " + feed.getName());
-        }
-        return cluster.getValidity();
-    }
-
-    public static Frequency getOldRetentionFrequency(Feed feed) {
-        Frequency feedFrequency = feed.getFrequency();
-        Frequency defaultFrequency = new Frequency("hours(24)");
-        if (DateUtil.getFrequencyInMillis(feedFrequency) < DateUtil.getFrequencyInMillis(defaultFrequency)) {
-            return new Frequency("hours(6)");
-        } else {
-            return defaultFrequency;
-        }
-    }
-
-    public static Frequency getRetentionFrequency(Feed feed, Cluster feedCluster) throws FalconException {
-        Frequency retentionFrequency;
-        retentionFrequency = getLifecycleRetentionFrequency(feed, feedCluster.getName());
-        if (retentionFrequency == null) {
-            retentionFrequency = getOldRetentionFrequency(feed);
-        }
-        return retentionFrequency;
-    }
-
-    public static int getRetentionLimitInSeconds(Feed feed, String clusterName) throws FalconException {
-        Frequency retentionLimit = new Frequency("minutes(0)");
-        RetentionStage retentionStage = getRetentionStage(feed, clusterName);
-        if (retentionStage != null) {
-            for (Property property : retentionStage.getProperties().getProperties()) {
-                if (property.getName().equalsIgnoreCase("retention.policy.agebaseddelete.limit")) {
-                    retentionLimit = new Frequency(property.getValue());
-                    break;
-                }
-            }
-        } else {
-            retentionLimit = getCluster(feed, clusterName).getRetention().getLimit();
-        }
-        Long freqInMillis = DateUtil.getFrequencyInMillis(retentionLimit);
-        return (int) (freqInMillis/1000);
-    }
-
-    /**
-     * Returns the replication job's queue name specified in the feed entity definition.
-     * First looks into the Lifecycle stage if exists. If null, looks into the queueName property specified
-     * in the Feed definition.
-     *
-     * @param feed
-     * @param feedCluster
-     * @return
-     * @throws FalconException
-     */
-    public static String getReplicationQueue(Feed feed, Cluster feedCluster) throws FalconException {
-        String queueName;
-        queueName = getLifecycleReplicationQueue(feed, feedCluster.getName());
-        if (StringUtils.isBlank(queueName)) {
-            queueName = getQueueFromProperties(feed);
-        }
-        return queueName;
-    }
-
-    /**
-     * Returns the retention job's queue name specified in the feed entity definition.
-     * First looks into the Lifecycle stage. If null, looks into the queueName property specified
-     * in the Feed definition.
-     *
-     * @param feed
-     * @param feedCluster
-     * @return
-     * @throws FalconException
-     */
-    public static String getRetentionQueue(Feed feed, Cluster feedCluster) throws FalconException {
-        String queueName = getLifecycleRetentionQueue(feed, feedCluster.getName());
-        if (StringUtils.isBlank(queueName)) {
-            queueName = getQueueFromProperties(feed);
-        }
-        return queueName;
-    }
-
-    /**
-     * Returns the queue name specified in the Feed entity definition from queueName property.
-     *
-     * @param feed
-     * @return queueName property value
-     */
-    public static String getQueueFromProperties(Feed feed) {
-        return getPropertyValue(feed, EntityUtil.MR_QUEUE_NAME);
-    }
-
-    /**
-     * Returns value of a feed property given property name.
-     * @param feed
-     * @param propName
-     * @return property value
-     */
-
-    public static String getPropertyValue(Feed feed, String propName) {
-        if (feed.getProperties() != null) {
-            for (Property prop : feed.getProperties().getProperties()) {
-                if ((prop != null) && (prop.getName().equals(propName))) {
-                    return prop.getValue();
-                }
-            }
-        }
-        return null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/FeedInstanceStatus.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/FeedInstanceStatus.java b/common/src/main/java/org/apache/falcon/entity/FeedInstanceStatus.java
deleted file mode 100644
index 8b43671..0000000
--- a/common/src/main/java/org/apache/falcon/entity/FeedInstanceStatus.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.entity;
-
-/**
- * Feed Instance Status is used to provide feed instance listing and corresponding status.
- *
- * This is used for exchanging information for getListing api
- */
-public class FeedInstanceStatus {
-
-    private String instance;
-
-    private final String uri;
-
-    private long creationTime;
-
-    private long size = -1;
-
-    private String sizeH;
-
-    private AvailabilityStatus status = AvailabilityStatus.MISSING;
-
-    /**
-     * Availability status of a feed instance.
-     *
-     * Missing if the feed partition is entirely missing,
-     * Available if present and the availability flag is also present
-     * Availability flag is configured in feed definition, but availability flag is missing in data path
-     * Empty if the empty
-     */
-    public enum AvailabilityStatus {MISSING, AVAILABLE, PARTIAL, EMPTY}
-
-    public FeedInstanceStatus(String uri) {
-        this.uri = uri;
-    }
-
-    public String getInstance() {
-        return instance;
-    }
-
-    public void setInstance(String instance) {
-        this.instance = instance;
-    }
-
-    public String getUri() {
-        return uri;
-    }
-
-    public long getCreationTime() {
-        return creationTime;
-    }
-
-    public void setCreationTime(long creationTime) {
-        this.creationTime = creationTime;
-    }
-
-    public long getSize() {
-        return size;
-    }
-
-    public String getSizeH(){
-        return sizeH;
-    }
-
-    public void setSize(long size) {
-        this.size = size;
-    }
-
-    public void setSizeH(String sizeH) {
-        this.sizeH = sizeH;
-    }
-
-
-    public AvailabilityStatus getStatus() {
-        return status;
-    }
-
-    public void setStatus(AvailabilityStatus status) {
-        this.status = status;
-    }
-
-    @Override
-    public String toString() {
-        return "FeedInstanceStatus{"
-                + "instance='" + instance + '\''
-                + ", uri='" + uri + '\''
-                + ", creationTime=" + creationTime
-                + ", size=" + size
-                + ", status='" + status + '\''
-                + '}';
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-
-        FeedInstanceStatus that = (FeedInstanceStatus) o;
-
-        if (creationTime != that.creationTime) {
-            return false;
-        }
-        if (size != that.size) {
-            return false;
-        }
-        if (!instance.equals(that.instance)) {
-            return false;
-        }
-        if (status != that.status) {
-            return false;
-        }
-        if (uri != null ? !uri.equals(that.uri) : that.uri != null) {
-            return false;
-        }
-
-        return true;
-    }
-
-    @Override
-    public int hashCode() {
-        int result = instance.hashCode();
-        result = 31 * result + (uri != null ? uri.hashCode() : 0);
-        result = 31 * result + (int) (creationTime ^ (creationTime >>> 32));
-        result = 31 * result + (int) (size ^ (size >>> 32));
-        result = 31 * result + (status != null ? status.hashCode() : 0);
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/FileSystemStorage.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/FileSystemStorage.java b/common/src/main/java/org/apache/falcon/entity/FileSystemStorage.java
deleted file mode 100644
index ece8b5d..0000000
--- a/common/src/main/java/org/apache/falcon/entity/FileSystemStorage.java
+++ /dev/null
@@ -1,509 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.Pair;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.retention.EvictedInstanceSerDe;
-import org.apache.falcon.retention.EvictionHelper;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.jsp.el.ELException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * A file system implementation of a feed storage.
- */
-public class FileSystemStorage extends Configured implements Storage {
-
-    private static final Logger LOG = LoggerFactory.getLogger(FileSystemStorage.class);
-    private final StringBuffer instancePaths = new StringBuffer();
-    private final StringBuilder instanceDates = new StringBuilder();
-
-    public static final String FEED_PATH_SEP = "#";
-    public static final String LOCATION_TYPE_SEP = "=";
-
-    public static final String FILE_SYSTEM_URL = "${nameNode}";
-
-    private final String storageUrl;
-    private final List<Location> locations;
-
-    public FileSystemStorage(Feed feed) {
-        this(FILE_SYSTEM_URL, feed.getLocations());
-    }
-
-    protected FileSystemStorage(String storageUrl, Locations locations) {
-        this(storageUrl, locations.getLocations());
-    }
-
-    protected FileSystemStorage(String storageUrl, List<Location> locations) {
-        if (storageUrl == null || storageUrl.length() == 0) {
-            throw new IllegalArgumentException("FileSystem URL cannot be null or empty");
-        }
-
-        if (locations == null || locations.size() == 0) {
-            throw new IllegalArgumentException("FileSystem Locations cannot be null or empty");
-        }
-
-        this.storageUrl = storageUrl;
-        this.locations = locations;
-    }
-
-    /**
-     * Create an instance from the URI Template that was generated using
-     * the getUriTemplate() method.
-     *
-     * @param uriTemplate the uri template from org.apache.falcon.entity.FileSystemStorage#getUriTemplate
-     * @throws URISyntaxException
-     */
-    protected FileSystemStorage(String uriTemplate) throws URISyntaxException {
-        if (uriTemplate == null || uriTemplate.length() == 0) {
-            throw new IllegalArgumentException("URI template cannot be null or empty");
-        }
-
-        String rawStorageUrl = null;
-        List<Location> rawLocations = new ArrayList<Location>();
-        String[] feedLocs = uriTemplate.split(FEED_PATH_SEP);
-        for (String rawPath : feedLocs) {
-            String[] typeAndPath = rawPath.split(LOCATION_TYPE_SEP);
-            final String processed = typeAndPath[1].replaceAll(DOLLAR_EXPR_START_REGEX, DOLLAR_EXPR_START_NORMALIZED)
-                                                   .replaceAll("}", EXPR_CLOSE_NORMALIZED);
-            URI uri = new URI(processed);
-            if (rawStorageUrl == null) {
-                rawStorageUrl = uri.getScheme() + "://" + uri.getAuthority();
-            }
-
-            String path = uri.getPath();
-            final String finalPath = path.replaceAll(DOLLAR_EXPR_START_NORMALIZED, DOLLAR_EXPR_START_REGEX)
-                                         .replaceAll(EXPR_CLOSE_NORMALIZED, EXPR_CLOSE_REGEX);
-
-            Location location = new Location();
-            location.setPath(finalPath);
-            location.setType(LocationType.valueOf(typeAndPath[0]));
-            rawLocations.add(location);
-        }
-
-        this.storageUrl = rawStorageUrl;
-        this.locations = rawLocations;
-    }
-
-    @Override
-    public TYPE getType() {
-        return TYPE.FILESYSTEM;
-    }
-
-    public String getStorageUrl() {
-        return storageUrl;
-    }
-
-    public List<Location> getLocations() {
-        return locations;
-    }
-
-    @Override
-    public String getUriTemplate() {
-        String feedPathMask = getUriTemplate(LocationType.DATA);
-        String metaPathMask = getUriTemplate(LocationType.META);
-        String statsPathMask = getUriTemplate(LocationType.STATS);
-        String tmpPathMask = getUriTemplate(LocationType.TMP);
-
-        StringBuilder feedBasePaths = new StringBuilder();
-        feedBasePaths.append(LocationType.DATA.name())
-                     .append(LOCATION_TYPE_SEP)
-                     .append(feedPathMask);
-
-        if (metaPathMask != null) {
-            feedBasePaths.append(FEED_PATH_SEP)
-                         .append(LocationType.META.name())
-                         .append(LOCATION_TYPE_SEP)
-                         .append(metaPathMask);
-        }
-
-        if (statsPathMask != null) {
-            feedBasePaths.append(FEED_PATH_SEP)
-                         .append(LocationType.STATS.name())
-                         .append(LOCATION_TYPE_SEP)
-                         .append(statsPathMask);
-        }
-
-        if (tmpPathMask != null) {
-            feedBasePaths.append(FEED_PATH_SEP)
-                         .append(LocationType.TMP.name())
-                         .append(LOCATION_TYPE_SEP)
-                         .append(tmpPathMask);
-        }
-
-        return feedBasePaths.toString();
-    }
-
-    @Override
-    public String getUriTemplate(LocationType locationType) {
-        return getUriTemplate(locationType, locations);
-    }
-
-    public String getUriTemplate(LocationType locationType, List<Location> locationList) {
-        Location locationForType = null;
-        for (Location location : locationList) {
-            if (location.getType() == locationType) {
-                locationForType = location;
-                break;
-            }
-        }
-
-        if (locationForType == null || StringUtils.isEmpty(locationForType.getPath())) {
-            return null;
-        }
-
-        // normalize the path so trailing and double '/' are removed
-        Path locationPath = new Path(locationForType.getPath());
-        locationPath = locationPath.makeQualified(getDefaultUri(), getWorkingDir());
-
-        if (isRelativePath(locationPath)) {
-            locationPath = new Path(storageUrl + locationPath);
-        }
-
-        return locationPath.toString();
-    }
-
-    private boolean isRelativePath(Path locationPath) {
-        return locationPath.toUri().getAuthority() == null && isStorageUrlATemplate();
-    }
-
-    private boolean isStorageUrlATemplate() {
-        return storageUrl.startsWith(FILE_SYSTEM_URL);
-    }
-
-    private URI getDefaultUri() {
-        return new Path(isStorageUrlATemplate() ? "/" : storageUrl).toUri();
-    }
-
-    public Path getWorkingDir() {
-        return new Path(CurrentUser.isAuthenticated() ? "/user/" + CurrentUser.getUser() : "/");
-    }
-
-    @Override
-    public boolean isIdentical(Storage toCompareAgainst) throws FalconException {
-        if (!(toCompareAgainst instanceof FileSystemStorage)) {
-            return false;
-        }
-
-        FileSystemStorage fsStorage = (FileSystemStorage) toCompareAgainst;
-        final List<Location> fsStorageLocations = fsStorage.getLocations();
-
-        return getLocations().size() == fsStorageLocations.size()
-                && StringUtils.equals(getUriTemplate(LocationType.DATA, getLocations()),
-                    getUriTemplate(LocationType.DATA, fsStorageLocations))
-                && StringUtils.equals(getUriTemplate(LocationType.STATS, getLocations()),
-                    getUriTemplate(LocationType.STATS, fsStorageLocations))
-                && StringUtils.equals(getUriTemplate(LocationType.META, getLocations()),
-                    getUriTemplate(LocationType.META, fsStorageLocations))
-                && StringUtils.equals(getUriTemplate(LocationType.TMP, getLocations()),
-                    getUriTemplate(LocationType.TMP, fsStorageLocations));
-    }
-
-    public static Location getLocation(List<Location> locations, LocationType type) {
-        for (Location loc : locations) {
-            if (loc.getType() == type) {
-                return loc;
-            }
-        }
-
-        return null;
-    }
-
-    @Override
-    public void validateACL(AccessControlList acl) throws FalconException {
-        try {
-            for (Location location : getLocations()) {
-                String pathString = getRelativePath(location);
-                Path path = new Path(pathString);
-                FileSystem fileSystem =
-                    HadoopClientFactory.get().createProxiedFileSystem(path.toUri(), getConf());
-                if (fileSystem.exists(path)) {
-                    FileStatus fileStatus = fileSystem.getFileStatus(path);
-                    Set<String> groups = CurrentUser.getGroupNames();
-
-                    if (fileStatus.getOwner().equals(acl.getOwner())
-                            || groups.contains(acl.getGroup())) {
-                        return;
-                    }
-
-                    LOG.error("Permission denied: Either Feed ACL owner {} or group {} doesn't "
-                                    + "match the actual file owner {} or group {} for file {}",
-                            acl, acl.getGroup(), fileStatus.getOwner(), fileStatus.getGroup(), path);
-                    throw new FalconException("Permission denied: Either Feed ACL owner "
-                            + acl + " or group " + acl.getGroup() + " doesn't match the actual "
-                            + "file owner " + fileStatus.getOwner() + " or group "
-                            + fileStatus.getGroup() + "  for file " + path);
-                }
-            }
-        } catch (IOException e) {
-            LOG.error("Can't validate ACL on storage {}", getStorageUrl(), e);
-            throw new RuntimeException("Can't validate storage ACL (URI " + getStorageUrl() + ")", e);
-        }
-    }
-
-    @Override
-    public StringBuilder evict(String retentionLimit, String timeZone, Path logFilePath) throws FalconException {
-        TimeZone tz = TimeZone.getTimeZone(timeZone);
-        try{
-            for (Location location : getLocations()) {
-                fileSystemEvictor(getUriTemplate(location.getType()), retentionLimit, tz, logFilePath);
-            }
-            EvictedInstanceSerDe.serializeEvictedInstancePaths(
-                    HadoopClientFactory.get().createProxiedFileSystem(logFilePath.toUri(), getConf()),
-                    logFilePath, instancePaths);
-        }catch (IOException e){
-            throw new FalconException("Couldn't evict feed from fileSystem", e);
-        }catch (ELException e){
-            throw new FalconException("Couldn't evict feed from fileSystem", e);
-        }
-
-        return instanceDates;
-    }
-
-    private void fileSystemEvictor(String feedPath, String retentionLimit, TimeZone timeZone,
-                                   Path logFilePath) throws IOException, ELException, FalconException {
-        Path normalizedPath = new Path(feedPath);
-        FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(normalizedPath.toUri());
-        feedPath = normalizedPath.toUri().getPath();
-        LOG.info("Normalized path: {}", feedPath);
-
-        Pair<Date, Date> range = EvictionHelper.getDateRange(retentionLimit);
-
-        List<Path> toBeDeleted = discoverInstanceToDelete(feedPath, timeZone, range.first, fs);
-        if (toBeDeleted.isEmpty()) {
-            LOG.info("No instances to delete.");
-            return;
-        }
-
-        DateFormat dateFormat = new SimpleDateFormat(FeedHelper.FORMAT);
-        dateFormat.setTimeZone(timeZone);
-        Path feedBasePath = fs.makeQualified(FeedHelper.getFeedBasePath(feedPath));
-        for (Path path : toBeDeleted) {
-            deleteInstance(fs, path, feedBasePath);
-            Date date = FeedHelper.getDate(feedPath, new Path(path.toUri().getPath()), timeZone);
-            instanceDates.append(dateFormat.format(date)).append(',');
-            instancePaths.append(path).append(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR);
-        }
-    }
-
-    private List<Path> discoverInstanceToDelete(String inPath, TimeZone timeZone, Date start, FileSystem fs)
-        throws IOException {
-        FileStatus[] files = findFilesForFeed(fs, inPath);
-        if (files == null || files.length == 0) {
-            return Collections.emptyList();
-        }
-
-        List<Path> toBeDeleted = new ArrayList<Path>();
-        for (FileStatus file : files) {
-            Date date = FeedHelper.getDate(inPath, new Path(file.getPath().toUri().getPath()), timeZone);
-            LOG.debug("Considering {}", file.getPath().toUri().getPath());
-            LOG.debug("Date: {}", date);
-            if (date != null && !isDateInRange(date, start)) {
-                toBeDeleted.add(file.getPath());
-            }
-        }
-        return toBeDeleted;
-    }
-
-    private FileStatus[] findFilesForFeed(FileSystem fs, String feedBasePath) throws IOException {
-        Matcher matcher = FeedDataPath.PATTERN.matcher(feedBasePath);
-        while (matcher.find()) {
-            String var = feedBasePath.substring(matcher.start(), matcher.end());
-            feedBasePath = feedBasePath.replaceAll(Pattern.quote(var), "*");
-            matcher = FeedDataPath.PATTERN.matcher(feedBasePath);
-        }
-        LOG.info("Searching for {}", feedBasePath);
-        return fs.globStatus(new Path(feedBasePath));
-    }
-
-    private boolean isDateInRange(Date date, Date start) {
-        //ignore end ( && date.compareTo(end) <= 0 )
-        return date.compareTo(start) >= 0;
-    }
-
-    private void deleteInstance(FileSystem fs, Path path, Path feedBasePath) throws IOException {
-        if (fs.delete(path, true)) {
-            LOG.info("Deleted instance: {}", path);
-        }else{
-            throw new IOException("Unable to delete instance: " + path);
-        }
-        deleteParentIfEmpty(fs, path.getParent(), feedBasePath);
-    }
-
-    private void deleteParentIfEmpty(FileSystem fs, Path parent, Path feedBasePath) throws IOException {
-        if (feedBasePath.equals(parent)) {
-            LOG.info("Not deleting feed base path: {}", parent);
-        } else {
-            FileStatus[] files = fs.listStatus(parent);
-            if (files != null && files.length == 0) {
-                LOG.info("Parent path: {} is empty, deleting path", parent);
-                if (fs.delete(parent, true)) {
-                    LOG.info("Deleted empty dir: {}", parent);
-                } else {
-                    throw new IOException("Unable to delete parent path:" + parent);
-                }
-                deleteParentIfEmpty(fs, parent.getParent(), feedBasePath);
-            }
-        }
-    }
-
-    @Override
-    @SuppressWarnings("MagicConstant")
-    public List<FeedInstanceStatus> getListing(Feed feed, String clusterName, LocationType locationType,
-                                               Date start, Date end) throws FalconException {
-
-        Calendar calendar = Calendar.getInstance();
-        List<Location> clusterSpecificLocation = FeedHelper.
-                getLocations(FeedHelper.getCluster(feed, clusterName), feed);
-        Location location = getLocation(clusterSpecificLocation, locationType);
-        try {
-            FileSystem fileSystem = HadoopClientFactory.get().createProxiedFileSystem(getConf());
-            Cluster cluster = ClusterHelper.getCluster(clusterName);
-            Properties baseProperties = FeedHelper.getClusterProperties(cluster);
-            baseProperties.putAll(FeedHelper.getFeedProperties(feed));
-            List<FeedInstanceStatus> instances = new ArrayList<FeedInstanceStatus>();
-            Date feedStart = FeedHelper.getCluster(feed, clusterName).getValidity().getStart();
-            TimeZone tz = feed.getTimezone();
-            Date alignedStart = EntityUtil.getNextStartTime(feedStart, feed.getFrequency(), tz, start);
-
-            String basePath = location.getPath();
-            while (!end.before(alignedStart)) {
-                Properties allProperties = ExpressionHelper.getTimeVariables(alignedStart, tz);
-                allProperties.putAll(baseProperties);
-                String feedInstancePath = ExpressionHelper.substitute(basePath, allProperties);
-                FileStatus fileStatus = getFileStatus(fileSystem, new Path(feedInstancePath));
-                FeedInstanceStatus instance = new FeedInstanceStatus(feedInstancePath);
-
-                Date date = FeedHelper.getDate(basePath, new Path(feedInstancePath), tz);
-                instance.setInstance(SchemaHelper.formatDateUTC(date));
-                if (fileStatus != null) {
-                    instance.setCreationTime(fileStatus.getModificationTime());
-                    ContentSummary contentSummary = fileSystem.getContentSummary(fileStatus.getPath());
-                    if (contentSummary != null) {
-                        long size = contentSummary.getSpaceConsumed();
-                        instance.setSize(size);
-                        if (!StringUtils.isEmpty(feed.getAvailabilityFlag())) {
-                            FileStatus doneFile = getFileStatus(fileSystem,
-                                    new Path(fileStatus.getPath(), feed.getAvailabilityFlag()));
-                            if (doneFile != null) {
-                                instance.setStatus(FeedInstanceStatus.AvailabilityStatus.AVAILABLE);
-                            } else {
-                                instance.setStatus(FeedInstanceStatus.AvailabilityStatus.PARTIAL);
-                            }
-                        } else {
-                            instance.setStatus(size > 0 ? FeedInstanceStatus.AvailabilityStatus.AVAILABLE
-                                    : FeedInstanceStatus.AvailabilityStatus.EMPTY);
-                        }
-                    }
-                }
-                instances.add(instance);
-                calendar.setTime(alignedStart);
-                calendar.add(feed.getFrequency().getTimeUnit().getCalendarUnit(),
-                        feed.getFrequency().getFrequencyAsInt());
-                alignedStart = calendar.getTime();
-            }
-            return instances;
-        } catch (IOException e) {
-            LOG.error("Unable to retrieve listing for {}:{}", locationType, getStorageUrl(), e);
-            throw new FalconException("Unable to retrieve listing for (URI " + getStorageUrl() + ")", e);
-        }
-    }
-
-    @Override
-    public FeedInstanceStatus.AvailabilityStatus getInstanceAvailabilityStatus(Feed feed, String clusterName,
-                                                                   LocationType locationType,
-                                                                   Date instanceTime) throws FalconException {
-
-        List<FeedInstanceStatus> result = getListing(feed, clusterName, locationType, instanceTime, instanceTime);
-        if (result.isEmpty()) {
-            return FeedInstanceStatus.AvailabilityStatus.MISSING;
-        } else {
-            return result.get(0).getStatus();
-        }
-    }
-
-    public FileStatus getFileStatus(FileSystem fileSystem, Path feedInstancePath) {
-        FileStatus fileStatus = null;
-        try {
-            fileStatus = fileSystem.getFileStatus(feedInstancePath);
-        } catch (IOException ignore) {
-            //ignore
-        }
-        return fileStatus;
-    }
-
-    public Configuration getConf() {
-        Configuration conf = new Configuration();
-        conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, storageUrl);
-        return conf;
-    }
-
-    private String getRelativePath(Location location) {
-        // if the path contains variables, locate on the "parent" path (just before first variable usage)
-        Matcher matcher = FeedDataPath.PATTERN.matcher(location.getPath());
-        boolean timedPath = matcher.find();
-        if (timedPath) {
-            return location.getPath().substring(0, matcher.start());
-        }
-        return location.getPath();
-    }
-
-    @Override
-    public String toString() {
-        return "FileSystemStorage{"
-                + "storageUrl='" + storageUrl + '\''
-                + ", locations=" + locations
-                + '}';
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/HiveUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/HiveUtil.java b/common/src/main/java/org/apache/falcon/entity/HiveUtil.java
deleted file mode 100644
index f4029e4..0000000
--- a/common/src/main/java/org/apache/falcon/entity/HiveUtil.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.security.SecurityUtil;
-
-import java.util.Properties;
-
-/**
- * Hive Utilities.
- */
-public final class HiveUtil {
-    public static final String METASTOREURIS = "hive.metastore.uris";
-    public static final String METASTROE_URI = "hcat.metastore.uri";
-    public static final String NODE = "hcatNode";
-    public static final String METASTORE_UGI = "hive.metastore.execute.setugi";
-
-    private HiveUtil() {
-
-    }
-
-    public static Properties getHiveCredentials(Cluster cluster) {
-        String metaStoreUrl = ClusterHelper.getRegistryEndPoint(cluster);
-        if (StringUtils.isBlank(metaStoreUrl)) {
-            throw new IllegalStateException(
-                    "Registry interface is not defined in cluster: " + cluster.getName());
-        }
-
-        Properties hiveCredentials = new Properties();
-        hiveCredentials.put(METASTOREURIS, metaStoreUrl);
-        hiveCredentials.put(METASTORE_UGI, "true");
-        hiveCredentials.put(NODE, metaStoreUrl.replace("thrift", "hcat"));
-        hiveCredentials.put(METASTROE_URI, metaStoreUrl);
-
-        if (SecurityUtil.isSecurityEnabled()) {
-            String principal = ClusterHelper
-                    .getPropertyValue(cluster, SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL);
-            hiveCredentials.put(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL, principal);
-            hiveCredentials.put(SecurityUtil.METASTORE_PRINCIPAL, principal);
-            hiveCredentials.put(SecurityUtil.METASTORE_USE_THRIFT_SASL, "true");
-        }
-        return hiveCredentials;
-    }
-}


[17/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/RecipeMerlin.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/RecipeMerlin.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/RecipeMerlin.java
deleted file mode 100644
index 9b9cff2..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/RecipeMerlin.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.Entities;
-
-import org.apache.commons.configuration.AbstractFileConfiguration;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.commons.configuration.PropertiesConfiguration;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.FalseFileFilter;
-import org.apache.commons.io.filefilter.RegexFileFilter;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-/** Class for representing a falcon recipe. */
-public final class RecipeMerlin {
-    private static final Logger LOGGER = Logger.getLogger(RecipeMerlin.class);
-    private static final String WORKFLOW_PATH_KEY = "falcon.recipe.workflow.path";
-    private static final String RECIPE_NAME_KEY = "falcon.recipe.name";
-    private static final String WRITE_DIR =
-        Config.getProperty("recipe.location", "/tmp/falcon-recipe");
-
-    private String template;
-    private AbstractFileConfiguration properties;
-    private String workflow;
-    private ClusterMerlin recipeCluster;
-    private ClusterMerlin srcCluster;
-    private ClusterMerlin tgtCluster;
-
-
-    public ClusterMerlin getRecipeCluster() {
-        return recipeCluster;
-    }
-
-    public ClusterMerlin getSrcCluster() {
-        return srcCluster;
-    }
-
-    public ClusterMerlin getTgtCluster() {
-        return tgtCluster;
-    }
-
-    public FalconCLI.RecipeOperation getRecipeOperation() {
-        return recipeOperation;
-    }
-
-    private FalconCLI.RecipeOperation recipeOperation;
-
-    private RecipeMerlin() {
-    }
-
-    public String getName() {
-        return properties.getString(RECIPE_NAME_KEY);
-    }
-
-    public void setUniqueName(String prefix) {
-        properties.setProperty(RECIPE_NAME_KEY, prefix + UUID.randomUUID().toString().split("-")[0]);
-    }
-
-    public String getSourceDir() {
-        return properties.getString("drSourceDir");
-    }
-
-    public RecipeMerlin withSourceDir(final String srcDir) {
-        properties.setProperty("drSourceDir", srcDir);
-        return this;
-    }
-
-    public String getTargetDir() {
-        return properties.getString("drTargetDir");
-    }
-
-    public RecipeMerlin withTargetDir(final String tgtDir) {
-        properties.setProperty("drTargetDir", tgtDir);
-        return this;
-    }
-
-    public String getSourceDb() {
-        return StringUtils.join(properties.getStringArray("sourceDatabase"), ',');
-    }
-
-    public RecipeMerlin withSourceDb(final String srcDatabase) {
-        properties.setProperty("sourceDatabase", srcDatabase);
-        return this;
-    }
-
-    public String getSourceTable() {
-        return StringUtils.join(properties.getStringArray("sourceTable"), ',');
-    }
-
-    public RecipeMerlin withSourceTable(final String tgtTable) {
-        properties.setProperty("sourceTable", tgtTable);
-        return this;
-    }
-
-    public RecipeMerlin withSourceCluster(ClusterMerlin sourceCluster) {
-        this.srcCluster = sourceCluster;
-        if (recipeOperation == FalconCLI.RecipeOperation.HDFS_REPLICATION) {
-            properties.setProperty("drSourceClusterFS", sourceCluster.getInterfaceEndpoint(Interfacetype.WRITE));
-        } else {
-            properties.setProperty("sourceCluster", sourceCluster.getName());
-            properties.setProperty("sourceMetastoreUri", sourceCluster.getProperty("hive.metastore.uris"));
-            properties.setProperty("sourceHiveServer2Uri", sourceCluster.getProperty("hive.server2.uri"));
-            //properties.setProperty("sourceServicePrincipal",
-            //    sourceCluster.getProperty("hive.metastore.kerberos.principal"));
-            properties.setProperty("sourceStagingPath", sourceCluster.getLocation("staging"));
-            properties.setProperty("sourceNN", sourceCluster.getInterfaceEndpoint(Interfacetype.WRITE));
-            properties.setProperty("sourceRM", sourceCluster.getInterfaceEndpoint(Interfacetype.EXECUTE));
-        }
-        return this;
-    }
-
-    public RecipeMerlin withTargetCluster(ClusterMerlin targetCluster) {
-        this.tgtCluster = targetCluster;
-        if (recipeOperation == FalconCLI.RecipeOperation.HDFS_REPLICATION) {
-            properties.setProperty("drTargetClusterFS", targetCluster.getInterfaceEndpoint(Interfacetype.WRITE));
-        } else {
-            properties.setProperty("targetCluster", targetCluster.getName());
-            properties.setProperty("targetMetastoreUri", targetCluster.getProperty("hive.metastore.uris"));
-            properties.setProperty("targetHiveServer2Uri", targetCluster.getProperty("hive.server2.uri"));
-            //properties.setProperty("targetServicePrincipal",
-            //    targetCluster.getProperty("hive.metastore.kerberos.principal"));
-            properties.setProperty("targetStagingPath", targetCluster.getLocation("staging"));
-            properties.setProperty("targetNN", targetCluster.getInterfaceEndpoint(Interfacetype.WRITE));
-            properties.setProperty("targetRM", targetCluster.getInterfaceEndpoint(Interfacetype.EXECUTE));
-        }
-        return this;
-    }
-
-    public RecipeMerlin withRecipeCluster(ClusterMerlin paramRecipeCluster) {
-        this.recipeCluster = paramRecipeCluster;
-        properties.setProperty("falcon.recipe.cluster.name", paramRecipeCluster.getName());
-        properties.setProperty("falcon.recipe.cluster.hdfs.writeEndPoint",
-            paramRecipeCluster.getInterfaceEndpoint(Interfacetype.WRITE));
-        return this;
-    }
-
-    public RecipeMerlin withValidity(final String start, final String end) {
-        properties.setProperty("falcon.recipe.cluster.validity.start", start);
-        properties.setProperty("falcon.recipe.cluster.validity.end", end);
-        return this;
-    }
-
-    public String getValidityStart() {
-        return properties.getString("falcon.recipe.cluster.validity.start");
-    }
-
-    public String getValidityEnd() {
-        return properties.getString("falcon.recipe.cluster.validity.end");
-    }
-
-    public RecipeMerlin withFrequency(final Frequency frequency) {
-        properties.setProperty("falcon.recipe.process.frequency", frequency.toString());
-        return this;
-    }
-
-    public Frequency getFrequency() {
-        return Frequency.fromString(properties.getString("falcon.recipe.process.frequency"));
-    }
-
-    public String getMaxEvents() {
-        return properties.getString("maxEvents");
-    }
-
-    public String getReplicationMaxMaps() {
-        return properties.getString("replicationMaxMaps");
-    }
-
-    public String getDistCpMaxMaps() {
-        return properties.getString("distcpMaxMaps");
-    }
-
-    public String getMapBandwidth() {
-        return properties.getString("distcpMapBandwidth");
-    }
-
-    public Retry getRetry() {
-        final int retryAttempts = properties.getInt("falcon.recipe.retry.attempts");
-        final String retryDelay = properties.getString("falcon.recipe.retry.delay");
-        final String retryPolicy = properties.getString("falcon.recipe.retry.policy");
-
-        Retry retry = new Retry();
-        retry.setAttempts(retryAttempts);
-        retry.setDelay(Frequency.fromString(retryDelay));
-        retry.setPolicy(PolicyType.fromValue(retryPolicy));
-        return retry;
-    }
-
-    public ACL getAcl() {
-        ACL acl = new ACL();
-        acl.setOwner(properties.getString("falcon.recipe.acl.owner"));
-        acl.setGroup(properties.getString("falcon.recipe.acl.group"));
-        acl.setPermission(properties.getString("falcon.recipe.acl.permission"));
-        return acl;
-    }
-
-
-    /**
-     * Read recipe from a given directory. Expecting that recipe will follow these conventions.
-     * <br> 1. properties file will have .properties extension
-     * <br> 2. template file will have end with -template.xml
-     * <br> 3. workflow file will have end with -workflow.xml
-     * @param readPath the location from where recipe will be read
-     * @param recipeOperation operation of this recipe
-     */
-    public static RecipeMerlin readFromDir(final String readPath,
-                                           FalconCLI.RecipeOperation recipeOperation) {
-        Assert.assertTrue(StringUtils.isNotEmpty(readPath), "readPath for recipe can't be empty");
-        Assert.assertNotNull(recipeOperation, "readPath for recipe can't be empty");
-        RecipeMerlin instance = new RecipeMerlin();
-        instance.recipeOperation = recipeOperation;
-        LOGGER.info("Loading recipe from directory: " + readPath);
-        File directory = null;
-        try {
-            directory = new File(RecipeMerlin.class.getResource("/" + readPath).toURI());
-        } catch (URISyntaxException e) {
-            Assert.fail("could not find dir: " + readPath);
-        }
-        final Collection<File> propertiesFiles = FileUtils.listFiles(directory,
-            new RegexFileFilter(".*\\.properties"), FalseFileFilter.INSTANCE);
-        Assert.assertEquals(propertiesFiles.size(), 1,
-            "Expecting only one property file at: " + readPath +" found: " + propertiesFiles);
-        try {
-            instance.properties =
-                new PropertiesConfiguration(propertiesFiles.iterator().next());
-        } catch (ConfigurationException e) {
-            Assert.fail("Couldn't read recipe's properties file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-        instance.properties.setFileName(null); //prevent accidental overwrite of template
-        //removing defaults - specific test need to supplied this
-        instance.properties.clearProperty("sourceDatabase");
-        instance.properties.clearProperty("sourceTable");
-        instance.properties.clearProperty("targetDatabase");
-        instance.properties.clearProperty("targetTable");
-        instance.properties.setProperty("falcon.recipe.acl.owner", MerlinConstants.CURRENT_USER_NAME);
-        instance.properties.setProperty("falcon.recipe.acl.group", MerlinConstants.CURRENT_USER_GROUP);
-        instance.properties.setProperty("falcon.recipe.acl.permission", "*");
-
-        final Collection<File> templatesFiles = FileUtils.listFiles(directory,
-            new RegexFileFilter(".*-template\\.xml"), FalseFileFilter.INSTANCE);
-        Assert.assertEquals(templatesFiles.size(), 1,
-            "Expecting only one template file at: " + readPath + " found: " + templatesFiles);
-        try {
-            instance.template =
-                FileUtils.readFileToString(templatesFiles.iterator().next());
-        } catch (IOException e) {
-            Assert.fail("Couldn't read recipe's template file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-
-        final Collection<File> workflowFiles = FileUtils.listFiles(directory,
-            new RegexFileFilter(".*-workflow\\.xml"), FalseFileFilter.INSTANCE);
-        Assert.assertEquals(workflowFiles.size(), 1,
-            "Expecting only one workflow file at: " + readPath + " found: " + workflowFiles);
-        try {
-            instance.workflow = FileUtils.readFileToString(workflowFiles.iterator().next());
-        } catch (IOException e) {
-            Assert.fail("Couldn't read recipe's workflow file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-        return instance;
-    }
-
-    /**
-     * Write recipe.
-     */
-    private void write() {
-        final String templateFileLocation = OSUtil.concat(WRITE_DIR, getName() + "-template.xml");
-        try {
-            Assert.assertNotNull(templateFileLocation,
-                "Write location for template file is unexpectedly null.");
-            FileUtils.writeStringToFile(new File(templateFileLocation), template);
-        } catch (IOException e) {
-            Assert.fail("Couldn't write recipe's template file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-
-        final String workflowFileLocation = OSUtil.concat(WRITE_DIR, getName() + "-workflow.xml");
-        try {
-            Assert.assertNotNull(workflowFileLocation,
-                "Write location for workflow file is unexpectedly null.");
-            FileUtils.writeStringToFile(new File(workflowFileLocation), workflow);
-        } catch (IOException e) {
-            Assert.fail("Couldn't write recipe's workflow file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-        properties.setProperty(WORKFLOW_PATH_KEY, workflowFileLocation);
-        properties.setProperty("falcon.recipe.workflow.name", getName() + "-workflow");
-
-        final String propFileLocation = OSUtil.concat(WRITE_DIR, getName() + ".properties");
-        try {
-            Assert.assertNotNull(propFileLocation,
-                "Write location for properties file is unexpectedly null.");
-            properties.save(new File(propFileLocation));
-        } catch (ConfigurationException e) {
-            Assert.fail("Couldn't write recipe's process file because of exception: "
-                + ExceptionUtils.getStackTrace(e));
-        }
-    }
-
-    /**
-     * Get submission command.
-     */
-    public List<String> getSubmissionCommand() {
-        write();
-        final List<String> cmd = new ArrayList<>();
-        Collections.addAll(cmd, "recipe", "-name", getName(),
-            "-operation", recipeOperation.toString());
-        return cmd;
-    }
-
-    /**
-     * Set tags for recipe.
-     */
-    public List<String> getTags() {
-        final String tagsStr = properties.getString("falcon.recipe.tags");
-        if (StringUtils.isEmpty(tagsStr)) {
-            return new ArrayList<>();
-        }
-        return Arrays.asList(tagsStr.split(","));
-    }
-
-    /**
-     * Set tags for recipe.
-     */
-    public void setTags(List<String> tags) {
-        properties.setProperty("falcon.recipe.tags", StringUtils.join(tags, ','));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/TestEntityUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/TestEntityUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/TestEntityUtil.java
deleted file mode 100644
index a9a8f00..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/TestEntityUtil.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.Entities;
-
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import javax.xml.bind.Unmarshaller;
-import java.io.StringReader;
-import java.util.UUID;
-
-/**
- * Util class for merlin entities.
- */
-final class TestEntityUtil {
-
-    private static final Logger LOGGER = Logger.getLogger(TestEntityUtil.class);
-    private TestEntityUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static Entity fromString(EntityType type, String str) {
-        try {
-            Unmarshaller unmarshaller = type.getUnmarshaller();
-            unmarshaller.setSchema(null);
-            return (Entity) unmarshaller.unmarshal(new StringReader(str));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /*
-    Deprecating entity name if is_deprecate is set.
-    Useful when oozie uses embedded database(derby)
-     */
-    public static String generateUniqueName(String prefix, String oldName) {
-        Assert.assertNotNull(prefix, "name prefix shouldn't be null!");
-        String randomPart = UUID.randomUUID().toString().split("-")[0];
-        if (MerlinConstants.IS_DEPRECATE) {
-            return  'A' + Integer.toHexString(prefix.hashCode()) + '-' + randomPart;
-        } else {
-            return prefix + '-' + oldName + '-' + randomPart;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/bundle/Bundle.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/bundle/Bundle.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/bundle/Bundle.java
deleted file mode 100644
index 9758d12..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/bundle/Bundle.java
+++ /dev/null
@@ -1,927 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.bundle;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.entity.v0.process.LateProcess;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bundle abstraction.
- */
-public class Bundle {
-
-    private static final String PRISM_PREFIX = "prism";
-    private static ColoHelper prismHelper = new ColoHelper(PRISM_PREFIX);
-    private static final Logger LOGGER = Logger.getLogger(Bundle.class);
-
-    private List<String> clusters;
-    private List<String> dataSets;
-    private String processData;
-
-    public void submitFeed()
-        throws URISyntaxException, IOException, AuthenticationException, JAXBException,
-        InterruptedException {
-        submitClusters(prismHelper);
-
-        AssertUtil.assertSucceeded(prismHelper.getFeedHelper().submitEntity(dataSets.get(0)));
-    }
-
-    public void submitAndScheduleFeed() throws Exception {
-        submitClusters(prismHelper);
-
-        AssertUtil.assertSucceeded(prismHelper.getFeedHelper().submitAndSchedule(dataSets.get(0)));
-    }
-
-    public void submitAndScheduleFeedUsingColoHelper(ColoHelper coloHelper) throws Exception {
-        submitFeed();
-
-        AssertUtil.assertSucceeded(coloHelper.getFeedHelper().schedule(dataSets.get(0)));
-    }
-
-    public void submitAndScheduleAllFeeds()
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-        submitClusters(prismHelper);
-
-        for (String feed : dataSets) {
-            AssertUtil.assertSucceeded(prismHelper.getFeedHelper().submitAndSchedule(feed));
-        }
-    }
-
-    public ServiceResponse submitProcess(boolean shouldSucceed) throws JAXBException,
-        IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        submitClusters(prismHelper);
-        submitFeeds(prismHelper);
-        ServiceResponse r = prismHelper.getProcessHelper().submitEntity(processData);
-        if (shouldSucceed) {
-            AssertUtil.assertSucceeded(r);
-        } else {
-            AssertUtil.assertFailed(r);
-        }
-        return r;
-    }
-
-    public void submitFeedsScheduleProcess() throws Exception {
-        submitClusters(prismHelper);
-
-        submitFeeds(prismHelper);
-
-        AssertUtil.assertSucceeded(prismHelper.getProcessHelper().submitAndSchedule(processData));
-    }
-
-
-    public void submitAndScheduleProcess() throws Exception {
-        submitAndScheduleAllFeeds();
-
-        AssertUtil.assertSucceeded(prismHelper.getProcessHelper().submitAndSchedule(processData));
-    }
-
-    public void submitAndScheduleProcessUsingColoHelper(ColoHelper coloHelper) throws Exception {
-        submitProcess(true);
-
-        AssertUtil.assertSucceeded(coloHelper.getProcessHelper().schedule(processData));
-    }
-
-    public List<String> getClusters() {
-        return clusters;
-    }
-
-    public Bundle(String clusterData, List<String> dataSets, String processData) {
-        this.dataSets = dataSets;
-        this.processData = processData;
-        this.clusters = new ArrayList<>();
-        this.clusters.add(clusterData);
-    }
-
-    public Bundle(Bundle bundle, String prefix) {
-        this.dataSets = new ArrayList<>(bundle.getDataSets());
-        this.processData = bundle.getProcessData();
-        this.clusters = new ArrayList<>();
-        for (String cluster : bundle.getClusters()) {
-            this.clusters.add(BundleUtil.getEnvClusterXML(cluster, prefix).toString());
-        }
-    }
-
-    public Bundle(Bundle bundle, ColoHelper helper) {
-        this(bundle, helper.getPrefix());
-    }
-
-    public void setClusterData(List<String> pClusters) {
-        this.clusters = new ArrayList<>(pClusters);
-    }
-    /**
-     * Unwraps cluster element to string and writes it to bundle.
-     *
-     * @param c      Cluster object to be unwrapped and set into bundle
-     */
-    public void writeClusterElement(org.apache.falcon.entity.v0.cluster.Cluster c) {
-        final List<String> newClusters = new ArrayList<>();
-        newClusters.add(c.toString());
-        setClusterData(newClusters);
-    }
-
-    /**
-     * Wraps bundle cluster in a Cluster object.
-     *
-     * @return cluster definition in a form of Cluster object
-     */
-    public ClusterMerlin getClusterElement() {
-        return new ClusterMerlin(getClusters().get(0));
-    }
-
-
-    public List<String> getClusterNames() {
-        List<String> clusterNames = new ArrayList<>();
-        for (String cluster : clusters) {
-            clusterNames.add(new ClusterMerlin(cluster).getName());
-        }
-        return clusterNames;
-    }
-
-    public List<String> getDataSets() {
-        return dataSets;
-    }
-
-    public void setDataSets(List<String> dataSets) {
-        this.dataSets = dataSets;
-    }
-
-    public String getProcessData() {
-        return processData;
-    }
-
-    public void setProcessData(String processData) {
-        this.processData = processData;
-    }
-
-    /**
-     * Generates unique entities within a bundle changing their names and names of dependant items
-     * to unique.
-     */
-    public void generateUniqueBundle(Object testClassObject) {
-        generateUniqueBundle(testClassObject.getClass().getSimpleName());
-    }
-
-    /**
-     * Generates unique entities within a bundle changing their names and names of dependant items
-     * to unique.
-     */
-    public void generateUniqueBundle(String prefix) {
-        /* creating new names */
-        List<ClusterMerlin> clusterMerlinList = BundleUtil.getClustersFromStrings(clusters);
-        Map<String, String> clusterNameMap = new HashMap<>();
-        for (ClusterMerlin clusterMerlin : clusterMerlinList) {
-            clusterNameMap.putAll(clusterMerlin.setUniqueName(prefix));
-        }
-
-        List<FeedMerlin> feedMerlinList = FeedMerlin.fromString(dataSets);
-        Map<String, String> feedNameMap = new HashMap<>();
-        for (FeedMerlin feedMerlin : feedMerlinList) {
-            feedNameMap.putAll(feedMerlin.setUniqueName(prefix));
-        }
-
-        /* setting new names in feeds and process */
-        for (FeedMerlin feedMerlin : feedMerlinList) {
-            feedMerlin.renameClusters(clusterNameMap);
-        }
-
-        /* setting variables */
-        clusters.clear();
-        for (ClusterMerlin clusterMerlin : clusterMerlinList) {
-            clusters.add(clusterMerlin.toString());
-        }
-        dataSets.clear();
-        for (FeedMerlin feedMerlin : feedMerlinList) {
-            dataSets.add(feedMerlin.toString());
-        }
-
-        if (StringUtils.isNotEmpty(processData)) {
-            ProcessMerlin processMerlin = new ProcessMerlin(processData);
-            processMerlin.setUniqueName(prefix);
-            processMerlin.renameClusters(clusterNameMap);
-            processMerlin.renameFeeds(feedNameMap);
-            processData = processMerlin.toString();
-        }
-    }
-
-    public ServiceResponse submitBundle(ColoHelper helper)
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-
-        submitClusters(helper);
-
-        //lets submit all data first
-        submitFeeds(helper);
-
-        return helper.getProcessHelper().submitEntity(getProcessData());
-    }
-
-    /**
-     * Submit all the entities and schedule the process.
-     *
-     * @param helper helper of prism host
-     * @return schedule response or cluster submit response if it fails
-     * @throws IOException
-     * @throws JAXBException
-     * @throws URISyntaxException
-     * @throws AuthenticationException
-     */
-    public ServiceResponse submitFeedsScheduleProcess(ColoHelper helper)
-        throws IOException, JAXBException, URISyntaxException,
-        AuthenticationException, InterruptedException {
-        ServiceResponse submitResponse = submitBundle(helper);
-        if (submitResponse.getCode() == 400) {
-            return submitResponse;
-        }
-
-        //lets schedule the damn thing now :)
-        ServiceResponse scheduleResult = helper.getProcessHelper().schedule(getProcessData());
-        AssertUtil.assertSucceeded(scheduleResult);
-        TimeUtil.sleepSeconds(7);
-        return scheduleResult;
-    }
-
-    /**
-     * Sets the only process input.
-     *
-     * @param startEl its start in terms of EL expression
-     * @param endEl its end in terms of EL expression
-     */
-    public void setProcessInput(String startEl, String endEl) {
-        ProcessMerlin process = getProcessObject();
-        process.setInputFeedWithEl(Util.readEntityName(getInputFeedFromBundle()), startEl, endEl);
-        this.setProcessData(process.toString());
-    }
-
-    public void setInvalidData() {
-        FeedMerlin dataElement = new FeedMerlin(getInputFeedFromBundle());
-        String oldLocation = dataElement.getLocations().getLocations().get(0).getPath();
-        LOGGER.info("oldlocation: " + oldLocation);
-        dataElement.getLocations().getLocations().get(0).setPath(
-            oldLocation.substring(0, oldLocation.indexOf('$')) + "invalid/"
-                    + oldLocation.substring(oldLocation.indexOf('$')));
-        LOGGER.info("new location: " + dataElement.getLocations().getLocations().get(0).getPath());
-        setInputFeed(dataElement.toString());
-    }
-
-    public void setInputFeed(String newFeed) {
-        String inputFeedName = getInputFeedNameFromBundle();
-        for (int i = 0; i < dataSets.size(); i++) {
-            if (new FeedMerlin(dataSets.get(i)).getName().equals(inputFeedName)) {
-                dataSets.set(i, newFeed);
-                return;
-            }
-        }
-    }
-
-    public void setFeedValidity(String feedStart, String feedEnd, String feedName) {
-        FeedMerlin feedElement = getFeedElement(feedName);
-        feedElement.setValidity(feedStart, feedEnd);
-        writeFeedElement(feedElement, feedName);
-    }
-
-    public int getInitialDatasetFrequency() {
-        FeedMerlin dataElement = new FeedMerlin(getInputFeedFromBundle());
-        if (dataElement.getFrequency().getTimeUnit() == TimeUnit.hours) {
-            return (Integer.parseInt(dataElement.getFrequency().getFrequency())) * 60;
-        } else {
-            return (Integer.parseInt(dataElement.getFrequency().getFrequency()));
-        }
-    }
-
-    public Date getStartInstanceProcess(Calendar time) {
-        ProcessMerlin processElement = getProcessObject();
-        LOGGER.info("start instance: " + processElement.getInputs().getInputs().get(0).getStart());
-        return TimeUtil.getMinutes(processElement.getInputs().getInputs().get(0).getStart(), time);
-    }
-
-    public Date getEndInstanceProcess(Calendar time) {
-        ProcessMerlin processElement = getProcessObject();
-        LOGGER.info("end instance: " + processElement.getInputs().getInputs().get(0).getEnd());
-        LOGGER.info("timezone in getendinstance: " + time.getTimeZone().toString());
-        LOGGER.info("time in getendinstance: " + time.getTime());
-        return TimeUtil.getMinutes(processElement.getInputs().getInputs().get(0).getEnd(), time);
-    }
-
-    public void setDatasetInstances(String startInstance, String endInstance) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setDatasetInstances(startInstance, endInstance);
-        setProcessData(processElement.toString());
-    }
-
-    public void setProcessPeriodicity(int frequency, TimeUnit periodicity) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setPeriodicity(frequency, periodicity);
-        setProcessData(processElement.toString());
-    }
-
-    public void setProcessInputStartEnd(String start, String end) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setProcessInputStartEnd(start, end);
-        setProcessData(processElement.toString());
-    }
-
-    public void setOutputFeedPeriodicity(int frequency, TimeUnit periodicity) {
-        ProcessMerlin processElement = new ProcessMerlin(processData);
-        String outputDataset = null;
-        int datasetIndex;
-        for (datasetIndex = 0; datasetIndex < dataSets.size(); datasetIndex++) {
-            outputDataset = dataSets.get(datasetIndex);
-            if (outputDataset.contains(processElement.getOutputs().getOutputs().get(0).getFeed())) {
-                break;
-            }
-        }
-
-        FeedMerlin feedElement = new FeedMerlin(outputDataset);
-
-        feedElement.setFrequency(new Frequency("" + frequency, periodicity));
-        dataSets.set(datasetIndex, feedElement.toString());
-        LOGGER.info("modified o/p dataSet is: " + dataSets.get(datasetIndex));
-    }
-
-    public int getProcessConcurrency() {
-        return getProcessObject().getParallel();
-    }
-
-    public void setOutputFeedLocationData(String path) {
-        FeedMerlin feedElement = getFeedElement(getOutputFeedNameFromBundle());
-        feedElement.setDataLocationPath(path);
-        writeFeedElement(feedElement, feedElement.getName());
-        LOGGER.info("modified location path dataSet is: " + feedElement);
-    }
-
-    public void setProcessConcurrency(int concurrency) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setParallel((concurrency));
-        setProcessData(processElement.toString());
-    }
-
-    public void setProcessWorkflow(String wfPath) {
-        setProcessWorkflow(wfPath, null);
-    }
-
-    public void setProcessWorkflow(String wfPath, EngineType engineType) {
-        setProcessWorkflow(wfPath, null, engineType);
-    }
-
-    public void setProcessWorkflow(String wfPath, String libPath, EngineType engineType) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setWorkflow(wfPath, libPath, engineType);
-        setProcessData(processElement.toString());
-    }
-
-    public ProcessMerlin getProcessObject() {
-        return new ProcessMerlin(getProcessData());
-    }
-
-    public FeedMerlin getFeedElement(String feedName) {
-        return new FeedMerlin(getFeed(feedName));
-    }
-
-
-    public String getFeed(String feedName) {
-        for (String feed : getDataSets()) {
-            if (Util.readEntityName(feed).contains(feedName)) {
-                return feed;
-            }
-        }
-
-        return null;
-    }
-
-
-    public void writeFeedElement(FeedMerlin feedElement, String feedName) {
-        writeFeedElement(feedElement.toString(), feedName);
-    }
-
-
-    public void writeFeedElement(String feedString, String feedName) {
-        dataSets.set(dataSets.indexOf(getFeed(feedName)), feedString);
-    }
-
-
-    public void setInputFeedPeriodicity(int frequency, TimeUnit periodicity) {
-        String feedName = getInputFeedNameFromBundle();
-        FeedMerlin feedElement = getFeedElement(feedName);
-        feedElement.setPeriodicity(frequency, periodicity);
-        writeFeedElement(feedElement, feedName);
-
-    }
-
-    public void setInputFeedValidity(String startInstance, String endInstance) {
-        String feedName = getInputFeedNameFromBundle();
-        this.setFeedValidity(startInstance, endInstance, feedName);
-    }
-
-    public void setOutputFeedValidity(String startInstance, String endInstance) {
-        String feedName = getOutputFeedNameFromBundle();
-        this.setFeedValidity(startInstance, endInstance, feedName);
-    }
-
-    public void setInputFeedDataPath(String path) {
-        String feedName = getInputFeedNameFromBundle();
-        FeedMerlin feedElement = getFeedElement(feedName);
-        feedElement.setDataLocationPath(path);
-        writeFeedElement(feedElement, feedName);
-    }
-
-    public String getFeedDataPathPrefix() {
-        FeedMerlin feedElement =
-            getFeedElement(getInputFeedNameFromBundle());
-        return Util.getPathPrefix(feedElement.getLocations().getLocations().get(0)
-            .getPath());
-    }
-
-    public void setProcessValidity(String startDate, String endDate) {
-        ProcessMerlin processElement = new ProcessMerlin(processData);
-        processElement.setValidity(startDate, endDate);
-        processData = processElement.toString();
-    }
-
-    public void setProcessLatePolicy(LateProcess lateProcess) {
-        ProcessMerlin processElement = new ProcessMerlin(processData);
-        processElement.setLateProcess(lateProcess);
-        processData = processElement.toString();
-    }
-
-
-    public void verifyDependencyListing(ColoHelper coloHelper)
-        throws InterruptedException, IOException, AuthenticationException, URISyntaxException {
-        //display dependencies of process:
-        String dependencies = coloHelper.getProcessHelper().getDependencies(
-            Util.readEntityName(getProcessData())).getEntityList().toString();
-
-        //verify presence
-        for (String cluster : clusters) {
-            Assert.assertTrue(dependencies.contains("(cluster) " + Util.readEntityName(cluster)));
-        }
-        for (String feed : getDataSets()) {
-            Assert.assertTrue(dependencies.contains("(feed) " + Util.readEntityName(feed)));
-            for (String cluster : clusters) {
-                Assert.assertTrue(coloHelper.getFeedHelper().getDependencies(
-                    Util.readEntityName(feed)).getEntityList().toString()
-                    .contains("(cluster) " + Util.readEntityName(cluster)));
-            }
-            Assert.assertFalse(coloHelper.getFeedHelper().getDependencies(
-                Util.readEntityName(feed)).getEntityList().toString()
-                .contains("(process)" + Util.readEntityName(getProcessData())));
-        }
-    }
-
-    public void addProcessInput(String inputName, String feedName) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.addInputFeed(inputName, feedName);
-        setProcessData(processElement.toString());
-    }
-
-    public void setProcessName(String newName) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setName(newName);
-        setProcessData(processElement.toString());
-
-    }
-
-    public void setRetry(Retry retry) {
-        LOGGER.info("old process: " + Util.prettyPrintXml(processData));
-        ProcessMerlin processObject = getProcessObject();
-        processObject.setRetry(retry);
-        processData = processObject.toString();
-        LOGGER.info("updated process: " + Util.prettyPrintXml(processData));
-    }
-
-    public void setInputFeedAvailabilityFlag(String flag) {
-        String feedName = getInputFeedNameFromBundle();
-        FeedMerlin feedElement = getFeedElement(feedName);
-        feedElement.setAvailabilityFlag(flag);
-        writeFeedElement(feedElement, feedName);
-    }
-
-    public void setOutputFeedAvailabilityFlag(String flag) {
-        String feedName = getOutputFeedNameFromBundle();
-        FeedMerlin feedElement = getFeedElement(feedName);
-        feedElement.setAvailabilityFlag(flag);
-        writeFeedElement(feedElement, feedName);
-    }
-
-    public void setCLusterColo(String colo) {
-        ClusterMerlin c = getClusterElement();
-        c.setColo(colo);
-        writeClusterElement(c);
-
-    }
-
-    public void setClusterInterface(Interfacetype interfacetype, String value) {
-        ClusterMerlin c = getClusterElement();
-        c.setInterface(interfacetype, value, null);
-        writeClusterElement(c);
-    }
-
-    public void setInputFeedTableUri(String tableUri) {
-        final String feedStr = getInputFeedFromBundle();
-        FeedMerlin feed = new FeedMerlin(feedStr);
-        feed.setTableUri(tableUri);
-        writeFeedElement(feed, feed.getName());
-    }
-
-    public void setOutputFeedTableUri(String tableUri) {
-        final String feedStr = getOutputFeedFromBundle();
-        FeedMerlin feed = new FeedMerlin(feedStr);
-        feed.setTableUri(tableUri);
-        writeFeedElement(feed, feed.getName());
-    }
-
-    public void setCLusterWorkingPath(String clusterData, String path) {
-        ClusterMerlin c = new ClusterMerlin(clusterData);
-        c.setWorkingLocationPath(path);
-        writeClusterElement(c);
-    }
-
-
-    public void submitClusters(ColoHelper helper)
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-        submitClusters(helper, null);
-    }
-
-    public void submitClusters(ColoHelper helper, String user)
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-        for (String cluster : this.clusters) {
-            AssertUtil.assertSucceeded(helper.getClusterHelper().submitEntity(cluster, user));
-        }
-    }
-
-    public void submitFeeds(ColoHelper helper)
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-        for (String feed : this.dataSets) {
-            AssertUtil.assertSucceeded(helper.getFeedHelper().submitEntity(feed));
-        }
-    }
-
-    public void addClusterToBundle(String clusterData, ClusterType type,
-                                   String startTime, String endTime) {
-        clusterData = setNewClusterName(clusterData);
-
-        this.clusters.add(clusterData);
-        //now to add clusters to feeds
-        for (int i = 0; i < dataSets.size(); i++) {
-            FeedMerlin feedObject = new FeedMerlin(dataSets.get(i));
-            org.apache.falcon.entity.v0.feed.Cluster cluster =
-                new org.apache.falcon.entity.v0.feed.Cluster();
-            cluster.setName(new ClusterMerlin(clusterData).getName());
-            cluster.setValidity(feedObject.getClusters().getClusters().get(0).getValidity());
-            cluster.setType(type);
-            cluster.setRetention(feedObject.getClusters().getClusters().get(0).getRetention());
-            feedObject.getClusters().getClusters().add(cluster);
-
-            dataSets.remove(i);
-            dataSets.add(i, feedObject.toString());
-
-        }
-
-        //now to add cluster to process
-        ProcessMerlin processObject = new ProcessMerlin(processData);
-        Cluster cluster = new Cluster();
-        cluster.setName(new ClusterMerlin(clusterData).getName());
-        org.apache.falcon.entity.v0.process.Validity v =
-            processObject.getClusters().getClusters().get(0).getValidity();
-        if (StringUtils.isNotEmpty(startTime)) {
-            v.setStart(TimeUtil.oozieDateToDate(startTime).toDate());
-        }
-        if (StringUtils.isNotEmpty(endTime)) {
-            v.setEnd(TimeUtil.oozieDateToDate(endTime).toDate());
-        }
-        cluster.setValidity(v);
-        processObject.getClusters().getClusters().add(cluster);
-        this.processData = processObject.toString();
-
-    }
-
-    private String setNewClusterName(String clusterData) {
-        ClusterMerlin clusterObj = new ClusterMerlin(clusterData);
-        clusterObj.setName(clusterObj.getName() + this.clusters.size() + 1);
-        return clusterObj.toString();
-    }
-
-    public void deleteBundle(ColoHelper helper) {
-
-        try {
-            helper.getProcessHelper().delete(getProcessData());
-        } catch (Exception e) {
-            e.getStackTrace();
-        }
-
-        for (String dataset : getDataSets()) {
-            try {
-                helper.getFeedHelper().delete(dataset);
-            } catch (Exception e) {
-                e.getStackTrace();
-            }
-        }
-
-        for (String cluster : this.getClusters()) {
-            try {
-                helper.getClusterHelper().delete(cluster);
-            } catch (Exception e) {
-                e.getStackTrace();
-            }
-        }
-
-
-    }
-
-    public String getProcessName() {
-        return new ProcessMerlin(this.processData).getName();
-    }
-
-    public void setProcessLibPath(String libPath) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.getWorkflow().setLib(libPath);
-        setProcessData(processElement.toString());
-    }
-
-    public void setProcessTimeOut(int magnitude, TimeUnit unit) {
-        ProcessMerlin processElement = getProcessObject();
-        processElement.setTimeOut(magnitude, unit);
-        setProcessData(processElement.toString());
-    }
-
-    public static void submitCluster(Bundle... bundles)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-
-        for (Bundle bundle : bundles) {
-            ServiceResponse r =
-                prismHelper.getClusterHelper().submitEntity(bundle.getClusters().get(0));
-            Assert.assertTrue(r.getMessage().contains("SUCCEEDED"), r.getMessage());
-        }
-
-
-    }
-
-    /**
-     * Generates unique entities definitions: clusters, feeds and process, populating them with
-     * desired values of different properties.
-     *
-     * @param numberOfClusters number of clusters on which feeds and process should run
-     * @param numberOfInputs number of desired inputs in process definition
-     * @param numberOfOptionalInput how many inputs should be optional
-     * @param inputBasePaths base data path for inputs
-     * @param numberOfOutputs number of outputs
-     * @param startTime start of feeds and process validity on every cluster
-     * @param endTime end of feeds and process validity on every cluster
-     */
-    public void generateRequiredBundle(int numberOfClusters, int numberOfInputs,
-                                       int numberOfOptionalInput,
-                                       String inputBasePaths, int numberOfOutputs, String startTime,
-                                       String endTime) {
-        //generate and set clusters
-        ClusterMerlin c = new ClusterMerlin(getClusters().get(0));
-        List<String> newClusters = new ArrayList<>();
-        final String clusterName = c.getName();
-        for (int i = 0; i < numberOfClusters; i++) {
-            c.setName(clusterName + i);
-            newClusters.add(i, c.toString());
-        }
-        setClusterData(newClusters);
-
-        //generate and set newDataSets
-        List<String> newDataSets = new ArrayList<>();
-        for (int i = 0; i < numberOfInputs; i++) {
-            final FeedMerlin feed = new FeedMerlin(getDataSets().get(0));
-            feed.setName(feed.getName() + "-input" + i);
-            feed.setFeedClusters(newClusters, inputBasePaths + "/input" + i, startTime, endTime);
-            newDataSets.add(feed.toString());
-        }
-        for (int i = 0; i < numberOfOutputs; i++) {
-            final FeedMerlin feed = new FeedMerlin(getDataSets().get(0));
-            feed.setName(feed.getName() + "-output" + i);
-            feed.setFeedClusters(newClusters, inputBasePaths + "/output" + i,  startTime, endTime);
-            newDataSets.add(feed.toString());
-        }
-        setDataSets(newDataSets);
-
-        //add clusters and feed to process
-        ProcessMerlin processMerlin = new ProcessMerlin(getProcessData());
-        processMerlin.setProcessClusters(newClusters, startTime, endTime);
-        processMerlin.setProcessFeeds(newDataSets, numberOfInputs,
-            numberOfOptionalInput, numberOfOutputs);
-        setProcessData(processMerlin.toString());
-    }
-
-    public void submitAndScheduleBundle(ColoHelper helper, boolean checkSuccess)
-        throws IOException, JAXBException, URISyntaxException, AuthenticationException,
-            InterruptedException {
-
-        for (int i = 0; i < getClusters().size(); i++) {
-            ServiceResponse r;
-            r = helper.getClusterHelper().submitEntity(getClusters().get(i));
-            if (checkSuccess) {
-                AssertUtil.assertSucceeded(r);
-            }
-        }
-        for (int i = 0; i < getDataSets().size(); i++) {
-            ServiceResponse r = helper.getFeedHelper().submitAndSchedule(getDataSets().get(i));
-            if (checkSuccess) {
-                AssertUtil.assertSucceeded(r);
-            }
-        }
-        ServiceResponse r = helper.getProcessHelper().submitAndSchedule(getProcessData());
-        if (checkSuccess) {
-            AssertUtil.assertSucceeded(r);
-        }
-    }
-
-    /**
-     * Changes names of process inputs.
-     *
-     * @param names desired names of inputs
-     */
-    public void setProcessInputNames(String... names) {
-        ProcessMerlin p = new ProcessMerlin(processData);
-        p.setInputNames(names);
-        processData = p.toString();
-    }
-
-    /**
-     * Adds optional property to process definition.
-     *
-     * @param properties desired properties to be added
-     */
-    public void addProcessProperty(String propName, String propValue) {
-        processData = new ProcessMerlin(processData).withProperty(propName, propValue).toString();
-    }
-
-    /**
-     * Sets partition for each input, according to number of supplied partitions.
-     *
-     * @param partition partitions to be set
-     */
-    public void setProcessInputPartition(String... partition) {
-        ProcessMerlin p = new ProcessMerlin(processData);
-        p.setInputPartition(partition);
-        processData = p.toString();
-    }
-
-    /**
-     * Sets name(s) of the process output(s).
-     *
-     * @param names new names of the outputs
-     */
-    public void setProcessOutputNames(String... names) {
-        ProcessMerlin p = new ProcessMerlin(processData);
-        p.setOutputNames(names);
-        processData = p.toString();
-    }
-
-    public void addInputFeedToBundle(String feedRefName, Feed feed) {
-        this.getDataSets().add(feed.toString());
-
-        ProcessMerlin processObject = new ProcessMerlin(processData);
-        processObject.addInputFeed(feedRefName, feed.getName());
-        setProcessData(processObject.toString());
-    }
-
-    public void addOutputFeedToBundle(String feedRefName, Feed feed) {
-        this.getDataSets().add(feed.toString());
-
-        ProcessMerlin processObject = getProcessObject();
-        processObject.addOutputFeed(feedRefName, feed.getName());
-        setProcessData(processObject.toString());
-    }
-
-    public void setProcessProperty(String property, String value) {
-        ProcessMerlin process = getProcessObject().withProperty(property, value);
-        this.setProcessData(process.toString());
-    }
-
-    public String getDatasetPath() {
-        FeedMerlin dataElement = new FeedMerlin(getInputFeedFromBundle());
-        return dataElement.getLocations().getLocations().get(0).getPath();
-    }
-
-    public String getInputFeedFromBundle() {
-        ProcessMerlin processObject = new ProcessMerlin(getProcessData());
-        return getFeed(processObject.getInputs().getInputs().get(0).getFeed());
-    }
-
-    public String getOutputFeedFromBundle() {
-        ProcessMerlin processObject = new ProcessMerlin(getProcessData());
-        return getFeed(processObject.getOutputs().getOutputs().get(0).getFeed());
-    }
-
-    public String getOutputFeedNameFromBundle() {
-        String feedData = getOutputFeedFromBundle();
-        FeedMerlin feedObject = new FeedMerlin(feedData);
-        return feedObject.getName();
-    }
-
-    public String getInputFeedNameFromBundle() {
-        String feedData = getInputFeedFromBundle();
-        FeedMerlin feedObject = new FeedMerlin(feedData);
-        return feedObject.getName();
-    }
-
-    /**
-     * Sets process pipelines.
-     * @param pipelines proposed pipelines
-     */
-    public void setProcessPipeline(String... pipelines){
-        ProcessMerlin process = new ProcessMerlin(getProcessData());
-        process.setPipelineTag(pipelines);
-        setProcessData(process.toString());
-    }
-
-    /**
-     * Set ACL of bundle's cluster.
-     */
-    public void setCLusterACL(String owner, String group, String permission) {
-        ClusterMerlin clusterMerlin = getClusterElement();
-        clusterMerlin.setACL(owner, group, permission);
-        writeClusterElement(clusterMerlin);
-
-    }
-
-    /**
-     * Set ACL of bundle's input feed.
-     */
-    public void setInputFeedACL(String owner, String group, String permission) {
-        String feedName = getInputFeedNameFromBundle();
-        FeedMerlin feedMerlin = getFeedElement(feedName);
-        feedMerlin.setACL(owner, group, permission);
-        writeFeedElement(feedMerlin, feedName);
-    }
-
-    /**
-     * Set ACL of bundle's process.
-     */
-    public void setProcessACL(String owner, String group, String permission) {
-        ProcessMerlin processMerlin = getProcessObject();
-        processMerlin.setACL(owner, group, permission);
-        setProcessData(processMerlin.toString());
-    }
-
-    /**
-     * Set custom tags for a process. Key-value pairs are valid.
-     */
-    public void setProcessTags(String value) {
-        ProcessMerlin processMerlin = getProcessObject();
-        processMerlin.setTags(value);
-        setProcessData(processMerlin.toString());
-    }
-
-
-    public static int runFalconCLI(List<String> args) throws Exception {
-        args.add(1, "-url");
-        args.add(2, prismHelper.getClusterHelper().getHostname());
-        LOGGER.info("Going to run falcon jar with args: " + args);
-        return new FalconCLI().run(args.toArray(new String[]{}));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/FreqType.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/FreqType.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/FreqType.java
deleted file mode 100644
index 46c30ba..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/FreqType.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.enumsAndConstants;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.Frequency;
-import org.joda.time.DateTime;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-
-/**
- * Enum to represent different feed periodicity.
- */
-public enum FreqType {
-    MINUTELY("minutely", "yyyy/MM/dd/HH/mm", "${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}",
-            "year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR};minute=${MINUTE}") {
-        public DateTime addTime(DateTime dateTime, int amount) {
-            return dateTime.plusMinutes(amount);
-        }
-
-        @Override
-        public Frequency.TimeUnit getFalconTimeUnit() {
-            return Frequency.TimeUnit.minutes;
-        }
-    },
-    HOURLY("hourly", "yyyy/MM/dd/HH", "${YEAR}/${MONTH}/${DAY}/${HOUR}",
-            "year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR}") {
-        @Override
-        public DateTime addTime(DateTime dateTime, int amount) {
-            return dateTime.plusHours(amount);
-        }
-
-        @Override
-        public Frequency.TimeUnit getFalconTimeUnit() {
-            return Frequency.TimeUnit.hours;
-        }
-    },
-    DAILY("daily", "yyyy/MM/dd", "${YEAR}/${MONTH}/${DAY}",
-            "year=${YEAR};month=${MONTH};day=${DAY}") {
-        @Override
-        public DateTime addTime(DateTime dateTime, int amount) {
-            return dateTime.plusDays(amount);
-        }
-
-        @Override
-        public Frequency.TimeUnit getFalconTimeUnit() {
-            return Frequency.TimeUnit.days;
-        }
-    },
-    MONTHLY("monthly", "yyyy/MM", "${YEAR}/${MONTH}",
-            "year=${YEAR};month=${MONTH}") {
-        @Override
-        public DateTime addTime(DateTime dateTime, int amount) {
-            return dateTime.plusMonths(amount);
-        }
-
-        @Override
-        public Frequency.TimeUnit getFalconTimeUnit() {
-            return Frequency.TimeUnit.months;
-        }
-    },
-    YEARLY("yearly", "yyyy", "${YEAR}",
-            "year=${YEAR}") {
-        @Override
-        public DateTime addTime(DateTime dateTime, int amount) {
-            return dateTime.plusYears(amount);
-        }
-
-        @Override
-        public Frequency.TimeUnit getFalconTimeUnit() {
-            throw new UnsupportedOperationException();
-        }
-    };
-
-    private final String value;
-    private final String pathValue;
-    private final String hcatPathValue;
-    private final DateTimeFormatter formatter;
-
-    private FreqType(String value, String format, String pathValue, String hcatPathValue) {
-        this.value = value;
-        formatter = DateTimeFormat.forPattern(format);
-        this.pathValue = pathValue;
-        this.hcatPathValue = hcatPathValue;
-    }
-
-    public String getValue() {
-        return value;
-    }
-
-    public String getPathValue() {
-        return pathValue;
-    }
-
-    public String getHcatPathValue() {
-        return hcatPathValue;
-    }
-
-    public DateTimeFormatter getFormatter() {
-        return formatter;
-    }
-
-    public int getDirDepth() {
-        return StringUtils.countMatches(pathValue, "/");
-    }
-
-    public abstract DateTime addTime(DateTime dateTime, int amount);
-
-    public abstract Frequency.TimeUnit getFalconTimeUnit();
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/MerlinConstants.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/MerlinConstants.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/MerlinConstants.java
deleted file mode 100644
index 076f68f..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/MerlinConstants.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.enumsAndConstants;
-
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.hadoop.conf.Configuration;
-import org.testng.Assert;
-import org.apache.log4j.Logger;
-
-import java.util.HashMap;
-
-/**
- * Class for test constants.
- */
-public final class MerlinConstants {
-    private MerlinConstants() {
-    }
-
-    private static final Logger LOGGER = Logger.getLogger(MerlinConstants.class);
-
-    public static final String PRISM_URL = Config.getProperty("prism.hostname");
-
-    public static final String HELP_URL =
-        Config.getProperty("falcon.help.url", "http://falcon.apache.org/");
-
-    public static final boolean IS_SECURE =
-        "kerberos".equals(new Configuration().get("hadoop.security.authentication", "simple"));
-
-    /** Staging location to use in cluster xml. */
-    public static final String STAGING_LOCATION = Config.getProperty("merlin.staging.location",
-        "/tmp/falcon-regression-staging");
-    /** Working location to use in cluster xml. */
-    public static final String WORKING_LOCATION = Config.getProperty("merlin.working.location",
-        "/tmp/falcon-regression-working");
-    public static final String TEMP_LOCATION = Config.getProperty("merlin.temp.location", "/tmp");
-
-    public static final String OOZIE_EXAMPLE_LIB = Config.getProperty("merlin.oozie_example_lib",
-            "https://repo1.maven.org/maven2/org/apache/oozie/oozie-examples/4.1.0/oozie-examples-4.1.0.jar");
-
-    /** the user that is going to run tests. */
-    public static final String CURRENT_USER_NAME = Config.getProperty("current_user_name",
-        System.getProperty("user.name"));
-    /** keytab of current user. */
-    private static final String CURRENT_USER_KEYTAB_STR = "current_user_keytab";
-    /** group of the current user. */
-    public static final String CURRENT_USER_GROUP =
-        Config.getProperty("current_user.group.name", "users");
-
-    /** a user that does not belong to the group of current user. */
-    public static final String DIFFERENT_USER_NAME = Config.getProperty("other.user.name", "root");
-
-    /** a user that does not belong to the group of current user. */
-    public static final String DIFFERENT_USER_GROUP = Config.getProperty("other.user.group", "root");
-
-    /** falcon super user. */
-    public static final String FALCON_SUPER_USER_NAME =
-            Config.getProperty("falcon.super.user.name", "falcon");
-
-    /** a user that belongs to falcon super user group but is not FALCON_SUPER_USER_NAME. */
-    public static final String FALCON_SUPER_USER2_NAME =
-            Config.getProperty("falcon.super.user2.name", "falcon2");
-    /** a user that has same group as that of current user. */
-    private static final String USER_2_NAME_STR = "user2_name";
-    private static final String USER_2_KEYTAB_STR = "user2_keytab";
-    public static final String USER2_NAME;
-    private static HashMap<String, String> keyTabMap;
-    private static HashMap<String, String> passwordMap;
-    public static final String USER_REALM = Config.getProperty("USER.REALM", "");
-    public static final String WASB_CONTAINER = Config.getProperty("wasb.container", "");
-    public static final String WASB_SECRET = Config.getProperty("wasb.secret", "");
-    public static final String WASB_ACCOUNT  = Config.getProperty("wasb.account", "");
-
-    public static final boolean CLEAN_TESTS_DIR =
-        Boolean.valueOf(Config.getProperty("clean_tests_dir", "true"));
-
-    public static final boolean IS_DEPRECATE=
-            Boolean.valueOf(Config.getProperty("is_deprecate", "false"));
-
-    /* initialize keyTabMap */
-    static {
-        final String currentUserKeytab = Config.getProperty(CURRENT_USER_KEYTAB_STR);
-        final String user2Name = Config.getProperty(USER_2_NAME_STR);
-        final String user2Keytab = Config.getProperty(USER_2_KEYTAB_STR);
-        LOGGER.info("CURRENT_USER_NAME: " + CURRENT_USER_NAME);
-        LOGGER.info("currentUserKeytab: " + currentUserKeytab);
-        LOGGER.info("user2Name: " + user2Name);
-        LOGGER.info("user2Keytab: " + user2Keytab);
-        USER2_NAME = user2Name;
-        keyTabMap = new HashMap<>();
-        keyTabMap.put(CURRENT_USER_NAME, currentUserKeytab);
-        keyTabMap.put(user2Name, user2Keytab);
-        keyTabMap.put(FALCON_SUPER_USER_NAME, Config.getProperty("falcon.super.user.keytab"));
-        keyTabMap.put(FALCON_SUPER_USER2_NAME, Config.getProperty("falcon.super.user2.keytab"));
-        keyTabMap.put(DIFFERENT_USER_NAME, Config.getProperty("other.user.keytab"));
-        passwordMap = new HashMap<>();
-        passwordMap.put(DIFFERENT_USER_NAME, Config.getProperty("other.user.password"));
-    }
-
-    public static String getKeytabForUser(String user) {
-        Assert.assertTrue(keyTabMap.containsKey(user), "Unknown user: " + user);
-        return keyTabMap.get(user);
-    }
-
-    public static String getPasswordForUser(String user) {
-        Assert.assertTrue(passwordMap.containsKey(user), "Unknown user: " + user);
-        return passwordMap.get(user);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/ResponseErrors.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/ResponseErrors.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/ResponseErrors.java
deleted file mode 100644
index b16edf6..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/ResponseErrors.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.enumsAndConstants;
-
-/** Class containing response errors of rest requests. */
-public enum ResponseErrors {
-
-    PROCESS_NOT_FOUND("(PROCESS) not found"),
-    UNPARSEABLE_DATE("Start and End dates cannot be empty for Instance POST apis"),
-    START_BEFORE_SCHEDULED("is before the entity was scheduled"),
-    PROCESS_INVALID_RANGE("is not in validity range of process"),
-    PROCESS_INSTANCE_FAULT("is not a valid instance time on cluster"),
-    FEED_INVALID_RANGE("is not in validity range for Feed"),
-    FEED_INSTANCE_FAULT("is not a valid instance for the  feed"),
-    INVALID_INSTANCE_TIME("not a valid instance");
-
-    private String error;
-
-    ResponseErrors(String error) {
-        this.error = error;
-    }
-
-    public String getError() {
-        return error;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/RetentionUnit.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/RetentionUnit.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/RetentionUnit.java
deleted file mode 100644
index 92ea25e..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/enumsAndConstants/RetentionUnit.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.enumsAndConstants;
-
-import org.joda.time.DateTime;
-
-/** Enum to represent different Retention Units. */
-public enum RetentionUnit {
-    MINUTES("minutes") {
-        @Override
-        public DateTime minusTime(DateTime dateTime, int amount) {
-            return dateTime.minusMinutes(amount);
-        }
-    }, HOURS("hours") {
-        @Override
-        public DateTime minusTime(DateTime dateTime, int amount) {
-            return dateTime.minusHours(amount);
-        }
-    }, DAYS("days") {
-        @Override
-        public DateTime minusTime(DateTime dateTime, int amount) {
-            return dateTime.minusDays(amount);
-        }
-    }, MONTHS("months") {
-        @Override
-        public DateTime minusTime(DateTime dateTime, int amount) {
-            return dateTime.minusMonths(amount);
-        }
-    }, YEARS("years") {
-        @Override
-        public DateTime minusTime(DateTime dateTime, int amount) {
-            return dateTime.minusYears(amount);
-        }
-    };
-
-    private String value;
-
-    private RetentionUnit(String value) {
-        this.value = value;
-    }
-
-    public String getValue() {
-        return value;
-    }
-
-    public abstract DateTime minusTime(DateTime dateTime, int amount);
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/ColoHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/ColoHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/ColoHelper.java
deleted file mode 100644
index 4528cb9..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/ColoHelper.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.regression.core.helpers.entity.EntityHelperFactory;
-
-/** Helper class to work with a colo. */
-public class ColoHelper {
-    private final AbstractEntityHelper clusterHelper;
-    private final AbstractEntityHelper processHelper;
-    private final AbstractEntityHelper feedHelper;
-    private String prefix;
-
-    public ColoHelper(String prefix) {
-        this.prefix = prefix;
-        clusterHelper = EntityHelperFactory.getEntityHelper(EntityType.CLUSTER, prefix);
-        processHelper = EntityHelperFactory.getEntityHelper(EntityType.PROCESS, prefix);
-        feedHelper = EntityHelperFactory.getEntityHelper(EntityType.FEED, prefix);
-    }
-
-    public AbstractEntityHelper getClusterHelper() {
-        return clusterHelper;
-    }
-
-    public AbstractEntityHelper getFeedHelper() {
-        return feedHelper;
-    }
-
-    public AbstractEntityHelper getProcessHelper() {
-        return processHelper;
-    }
-
-    public String getPrefix() {
-        return prefix;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/FalconClientBuilder.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/FalconClientBuilder.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/FalconClientBuilder.java
deleted file mode 100644
index fcbb7f9..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/FalconClientBuilder.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers;
-
-import org.apache.commons.exec.CommandLine;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang3.builder.Builder;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.regression.core.util.OSUtil;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * FalconClientBuilder is to be used for launching falcon client command.
- */
-public final class FalconClientBuilder implements Builder<CommandLine> {
-    private final String user;
-    private final CommandLine commandLine;
-    private final List<String> args;
-    private final SuType suType;
-
-    private enum SuType {
-        /**
-         * Takes care of switching user on linux. Current implemented through sudo.
-         */
-        LIN_SUDO {
-            @Override
-            public CommandLine getCommandLine(String forUser) {
-                return CommandLine.parse("sudo").addArgument("-u")
-                    .addArgument(forUser).addArgument(FALCON_CLIENT_BINARY);
-            }
-            @Override
-            public void addArgsToCommandLine(CommandLine cmdLine, List<String> arguments) {
-                for (String arg : arguments) {
-                    cmdLine.addArgument(arg);
-                }
-            }
-        },
-        /**
-         * Takes care of switching user on windows. Needs to be implemented.
-         */
-        WIN_SU {
-            @Override
-            public CommandLine getCommandLine(String forUser) {
-                return CommandLine.parse(OSUtil.WIN_SU_BINARY)
-                    .addArgument("-u").addArgument(forUser)
-                    .addArgument("-p").addArgument(MerlinConstants.getPasswordForUser(forUser))
-                    .addArgument(FALCON_CLIENT_BINARY);
-            }
-            @Override
-            public void addArgsToCommandLine(CommandLine cmdLine, List<String> arguments) {
-                String lastArg = StringUtils.join(arguments, " ");
-                cmdLine.addArgument(lastArg, true);
-            }
-        },
-        /**
-         * Takes care of the case where no user switch is required.
-         */
-        NONE {
-            @Override
-            public CommandLine getCommandLine(String forUser) {
-                return CommandLine.parse(FALCON_CLIENT_BINARY);
-            }
-            @Override
-            public void addArgsToCommandLine(CommandLine cmdLine, List<String> arguments) {
-                for (String arg : arguments) {
-                    cmdLine.addArgument(arg);
-                }
-            }
-        };
-
-        private static final String FALCON_CLIENT_BINARY =
-                Config.getProperty("falcon.client.binary", "falcon");
-        public abstract void addArgsToCommandLine(CommandLine cmdLine, List<String> arguments);
-        public abstract CommandLine getCommandLine(String forUser);
-    }
-
-    private FalconClientBuilder(String user) {
-        this.user = user;
-        args = new ArrayList<>();
-        if (user == null) {
-            suType = SuType.NONE;
-            commandLine = suType.getCommandLine(null);
-        } else {
-            if (OSUtil.IS_WINDOWS) {
-                suType = SuType.WIN_SU;
-                commandLine = suType.getCommandLine(user);
-            } else {
-                suType = SuType.LIN_SUDO;
-                //attempting sudo su - root -c "falcon admin -version"
-                commandLine = suType.getCommandLine(user);
-            }
-        }
-    }
-
-    /**
-     * Get an instance of FalconClientBuilder.
-     * @return instance of FalconClientBuilder
-     */
-    public static FalconClientBuilder getBuilder() {
-        return new FalconClientBuilder(null);
-    }
-
-    /**
-     * Get an instance of FalconClientBuilder for the given user. It would do commandline
-     * construction in a way that the final command is run as given user.
-     * @return instance of FalconClientBuilder
-     */
-    public static FalconClientBuilder getBuilder(String user) {
-        return new FalconClientBuilder(user);
-    }
-
-    /**
-     * Add the given argument.
-     * @param arg argument to be added to builder
-     * @return this
-     */
-    private FalconClientBuilder addArg(String arg) {
-        args.add(arg);
-        return this;
-    }
-
-    /**
-     * Create get metrics command.
-     * @param entityType type of the entity
-     * @param entityName name of the entity to be deleted
-     * @return this
-     */
-    public FalconClientBuilder getMetricsCommand(String entityType, String entityName) {
-        addArg("metadata").addArg("-list");
-        addArg("-type").addArg("replication_metrics");
-        addArg("-" + entityType).addArg(entityName);
-        return this;
-    }
-
-    /**
-     * Create submit command.
-     * @param entityType type of the entity
-     * @param fileName file containing the entity to be submitted
-     * @return this
-     */
-    public FalconClientBuilder getSubmitCommand(String entityType, String fileName) {
-        addArg("entity").addArg("-submit");
-        addArg("-type").addArg(entityType);
-        addArg("-file").addArg(fileName);
-        return this;
-    }
-
-    /**
-     * Create delete command.
-     * @param entityType type of the entity
-     * @param entityName name of the entity to be deleted
-     * @return this
-     */
-    public FalconClientBuilder getDeleteCommand(String entityType, String entityName) {
-        addArg("entity").addArg("-delete");
-        addArg("-type").addArg(entityType);
-        addArg("-name").addArg(entityName);
-        return this;
-    }
-
-
-    /**
-     * Build the CommandLine object for this FalconClientBuilder.
-     * @return instance of CommandLine object
-     */
-    @Override
-    public CommandLine build() {
-        suType.addArgsToCommandLine(commandLine, args);
-        return new CommandLine(commandLine);
-    }
-}


[46/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/Entity.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/Entity.java b/client/src/main/java/org/apache/falcon/entity/v0/Entity.java
deleted file mode 100644
index ba6f2e5..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/Entity.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import java.io.StringReader;
-import java.io.StringWriter;
-
-/**
- * Base class that all entity jaxb object will extend.
- */
-public abstract class Entity {
-    public abstract String getName();
-
-    public abstract String getTags();
-
-    public abstract AccessControlList getACL();
-
-    public EntityType getEntityType() {
-        for (EntityType type : EntityType.values()) {
-            if (type.getEntityClass().equals(getClass())) {
-                return type;
-            }
-        }
-        return null;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || !o.getClass().equals(this.getClass())) {
-            return false;
-        }
-
-        Entity entity = (Entity) o;
-
-        String name = getName();
-        return !(name != null ? !name.equals(entity.getName()) : entity.getName() != null);
-    }
-
-    @Override
-    public int hashCode() {
-        String clazz = this.getClass().getName();
-
-        String name = getName();
-        int result = name != null ? name.hashCode() : 0;
-        result = 31 * result + clazz.hashCode();
-        return result;
-    }
-
-    @Override
-    public String toString() {
-        try {
-            StringWriter stringWriter = new StringWriter();
-            Marshaller marshaller = getEntityType().getMarshaller();
-            marshaller.marshal(this, stringWriter);
-            return stringWriter.toString();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public static Entity fromString(EntityType type, String str) {
-        try {
-            Unmarshaller unmarshaler = type.getUnmarshaller();
-            return (Entity) unmarshaler.unmarshal(new StringReader(str));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public String toShortString() {
-        return "(" + getEntityType().name().toLowerCase() + ") " + getName();
-    }
-
-    public Entity copy() {
-        return fromString(getEntityType(), toString());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/EntityNotification.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/EntityNotification.java b/client/src/main/java/org/apache/falcon/entity/v0/EntityNotification.java
deleted file mode 100644
index bab70d4..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/EntityNotification.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-/**
- * EntityNotification class to be extended by Feed/Process notification class.
- */
-public abstract class EntityNotification {
-    public abstract String getType();
-    public abstract String getLevel();
-    public abstract String getTo();
-
-    public String toString() {
-        return "Notification{"
-                + "type=" + getType()
-                + ", to=" + getTo()
-                + "}";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/EntityType.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/EntityType.java b/client/src/main/java/org/apache/falcon/entity/v0/EntityType.java
deleted file mode 100644
index 3d55547..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/EntityType.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.ValidationEvent;
-import javax.xml.bind.ValidationEventHandler;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.validation.Schema;
-import javax.xml.validation.SchemaFactory;
-import java.util.Arrays;
-
-/**
- * Enum for types of entities in Falcon Process, Feed and Cluster.
- */
-public enum EntityType {
-    FEED(Feed.class, "/feed-0.1.xsd", "name"),
-    PROCESS(Process.class, "/process-0.1.xsd", "name"),
-    CLUSTER(Cluster.class, "/cluster-0.1.xsd", "name"),
-    DATASOURCE(Datasource.class, "/datasource-0.1.xsd", "name");
-
-    //Fail unmarshalling of whole xml if unmarshalling of any element fails
-    private static class EventHandler implements ValidationEventHandler {
-        @Override
-        public boolean handleEvent(ValidationEvent event) {
-            return false;
-        }
-    }
-
-    private static final String NS = "http://www.w3.org/2001/XMLSchema";
-
-    private final Class<? extends Entity> clazz;
-    private JAXBContext jaxbContext;
-    private Schema schema;
-    private String[] immutableProperties;
-
-    private String schemaFile;
-
-    private EntityType(Class<? extends Entity> typeClass, String schemaFile, String... immutableProperties) {
-        clazz = typeClass;
-        this.immutableProperties = immutableProperties;
-        this.schemaFile = schemaFile;
-        try {
-            jaxbContext = JAXBContext.newInstance(typeClass);
-            synchronized (this) {
-                SchemaFactory schemaFactory = SchemaFactory.newInstance(NS);
-                schema = schemaFactory.newSchema(getClass().getResource(schemaFile));
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public Class<? extends Entity> getEntityClass() {
-        return clazz;
-    }
-
-    public String getSchemaFile() {
-        return schemaFile;
-    }
-
-    public Marshaller getMarshaller() throws JAXBException {
-        Marshaller marshaller = jaxbContext.createMarshaller();
-        marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
-        return marshaller;
-    }
-
-    public Unmarshaller getUnmarshaller() throws JAXBException {
-        Unmarshaller unmarshaller = jaxbContext.createUnmarshaller();
-        unmarshaller.setSchema(schema);
-        unmarshaller.setEventHandler(new EventHandler());
-        return unmarshaller;
-    }
-
-
-    public boolean isSchedulable() {
-        // Cluster and Datasource are not schedulable like Feed and Process
-        return ((this != EntityType.CLUSTER) && (this != EntityType.DATASOURCE));
-    }
-
-    @edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP"})
-    public String[] getImmutableProperties() {
-        return immutableProperties;
-    }
-
-    public static EntityType getEnum(String type) {
-        try {
-            return EntityType.valueOf(type.toUpperCase().trim());
-        } catch (IllegalArgumentException iae) {
-            throw new IllegalArgumentException("Invalid entity type: " + type + ". Expected "
-                    + Arrays.toString(values()).toLowerCase() + ".");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/Frequency.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/Frequency.java b/client/src/main/java/org/apache/falcon/entity/v0/Frequency.java
deleted file mode 100644
index f423df6..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/Frequency.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import java.util.Calendar;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Frequency as supported in the xsd definitions.
- */
-public class Frequency {
-    private static final Pattern PATTERN = Pattern.compile("(minutes|hours|days|months)\\((\\d+)\\)");
-
-    /**
-     * TimeUnit corresponding to the frequency.
-     */
-    public static enum TimeUnit {
-        minutes(Calendar.MINUTE), hours(Calendar.HOUR), days(Calendar.DATE), months(Calendar.MONTH);
-
-        private int calendarUnit;
-
-        private TimeUnit(int calendarUnit) {
-            this.calendarUnit = calendarUnit;
-        }
-
-        public int getCalendarUnit() {
-            return calendarUnit;
-        }
-    }
-
-    private TimeUnit timeUnit;
-    private String frequency;
-
-    public Frequency(String freq, TimeUnit timeUnit) {
-        this.frequency = freq;
-        this.timeUnit = timeUnit;
-    }
-
-    public Frequency(String strValue) {
-        Matcher matcher = PATTERN.matcher(strValue);
-        if (!matcher.matches()) {
-            throw new IllegalArgumentException("Invalid frequency: " + strValue);
-        }
-
-        timeUnit = TimeUnit.valueOf(matcher.group(1));
-        frequency = matcher.group(2);
-    }
-
-    public static Frequency fromString(String strValue) {
-        return new Frequency(strValue);
-    }
-
-    public static String toString(Frequency freq) {
-        return freq==null? null:freq.toString();
-    }
-
-    @Override
-    public String toString() {
-        return timeUnit.name() + "(" + frequency + ")";
-    }
-
-    public TimeUnit getTimeUnit() {
-        return timeUnit;
-    }
-
-    public String getFrequency() {
-        return frequency;
-    }
-
-    public int getFrequencyAsInt() {
-        return Integer.parseInt(frequency);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (obj == null) {
-            return false;
-        }
-
-        if (!(obj instanceof Frequency)) {
-            return false;
-        }
-
-        Frequency freq = (Frequency) obj;
-        return this == freq || this.getFrequency().equals(freq.getFrequency())
-                && this.getTimeUnit() == freq.getTimeUnit();
-
-    }
-
-    @Override
-    public int hashCode() {
-        int result = timeUnit.hashCode();
-        result = 31 * result + frequency.hashCode();
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/entity/v0/SchemaHelper.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/entity/v0/SchemaHelper.java b/client/src/main/java/org/apache/falcon/entity/v0/SchemaHelper.java
deleted file mode 100644
index 1c02f37..0000000
--- a/client/src/main/java/org/apache/falcon/entity/v0/SchemaHelper.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.TimeZone;
-
-/**
- * Support function to parse and format date in xsd string.
- */
-public final class SchemaHelper {
-
-    public static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm'Z'";
-
-    private SchemaHelper() {}
-
-    public static String getTimeZoneId(TimeZone tz) {
-        return tz.getID();
-    }
-
-    public static DateFormat getDateFormat() {
-        DateFormat dateFormat = new SimpleDateFormat(ISO8601_FORMAT);
-        dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-        return dateFormat;
-    }
-
-    public static String formatDateUTC(Date date) {
-        return (date != null) ? getDateFormat().format(date) : null;
-    }
-
-    public static Date parseDateUTC(String dateStr) {
-        if (!DateValidator.validate(dateStr)) {
-            throw new IllegalArgumentException(dateStr + " is not a valid UTC string");
-        }
-        try {
-            return getDateFormat().parse(dateStr);
-        } catch (ParseException e) {
-            throw new RuntimeException("Unable to parse date: " + dateStr, e);
-        }
-    }
-
-    public static String formatDateUTCToISO8601(final String dateString, final String dateStringFormat) {
-
-        try {
-            DateFormat dateFormat = new SimpleDateFormat(dateStringFormat.substring(0, dateString.length()));
-            dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-            return SchemaHelper.formatDateUTC(dateFormat.parse(dateString));
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/metadata/RelationshipType.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/metadata/RelationshipType.java b/client/src/main/java/org/apache/falcon/metadata/RelationshipType.java
deleted file mode 100644
index 6624319..0000000
--- a/client/src/main/java/org/apache/falcon/metadata/RelationshipType.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-/**
- * Enumerates Relationship types.
- */
-public enum RelationshipType {
-
-    // entity vertex types
-    CLUSTER_ENTITY("cluster-entity"),
-    FEED_ENTITY("feed-entity"),
-    PROCESS_ENTITY("process-entity"),
-    DATASOURCE_ENTITY("datasource-entity"),
-
-    // instance vertex types
-    FEED_INSTANCE("feed-instance"),
-    PROCESS_INSTANCE("process-instance"),
-    IMPORT_INSTANCE("import-instance"),
-
-    // Misc vertex types
-    USER("user"),
-    COLO("data-center"),
-    TAGS("classification"),
-    GROUPS("group"),
-    PIPELINES("pipelines"),
-    REPLICATION_METRICS("replication-metrics");
-
-
-    private final String name;
-
-    RelationshipType(java.lang.String name) {
-        this.name = name;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public static RelationshipType fromString(String value) {
-        if (value != null) {
-            for (RelationshipType type : RelationshipType.values()) {
-                if (value.equals(type.getName())) {
-                    return type;
-                }
-            }
-        }
-
-        throw new IllegalArgumentException("No constant with value " + value + " found");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeTool.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeTool.java b/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeTool.java
deleted file mode 100644
index cf24078..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeTool.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Properties;
-import java.io.File;
-
-/**
- * Hdfs Replication recipe tool for Falcon recipes.
- */
-public class HdfsReplicationRecipeTool implements Recipe {
-
-    private static final String COMMA_SEPARATOR = ",";
-
-    @Override
-    public void validate(final Properties recipeProperties) {
-        for (HdfsReplicationRecipeToolOptions option : HdfsReplicationRecipeToolOptions.values()) {
-            if (recipeProperties.getProperty(option.getName()) == null && option.isRequired()) {
-                throw new IllegalArgumentException("Missing argument: " + option.getName());
-            }
-        }
-    }
-
-    @Override
-    public Properties getAdditionalSystemProperties(final Properties recipeProperties) {
-        Properties additionalProperties = new Properties();
-
-        // Construct fully qualified hdfs src path
-        String srcPaths = recipeProperties.getProperty(HdfsReplicationRecipeToolOptions
-                .REPLICATION_SOURCE_DIR.getName());
-        StringBuilder absoluteSrcPaths = new StringBuilder();
-        String srcFsPath = recipeProperties.getProperty(
-                HdfsReplicationRecipeToolOptions.REPLICATION_SOURCE_CLUSTER_FS_WRITE_ENDPOINT.getName());
-        if (StringUtils.isNotEmpty(srcFsPath)) {
-            srcFsPath = StringUtils.removeEnd(srcFsPath, File.separator);
-        }
-        if (StringUtils.isNotEmpty(srcPaths)) {
-            String[] paths = srcPaths.split(COMMA_SEPARATOR);
-
-            for (String path : paths) {
-                StringBuilder srcpath = new StringBuilder(srcFsPath);
-                srcpath.append(path.trim());
-                srcpath.append(COMMA_SEPARATOR);
-                absoluteSrcPaths.append(srcpath);
-            }
-        }
-
-        additionalProperties.put(HdfsReplicationRecipeToolOptions.REPLICATION_SOURCE_DIR.getName(),
-                StringUtils.removeEnd(absoluteSrcPaths.toString(), COMMA_SEPARATOR));
-        return additionalProperties;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeToolOptions.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeToolOptions.java b/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeToolOptions.java
deleted file mode 100644
index 4c3b543..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/HdfsReplicationRecipeToolOptions.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-/**
- * Hdfs Recipe tool options.
- */
-public enum HdfsReplicationRecipeToolOptions {
-    REPLICATION_SOURCE_DIR("drSourceDir", "Location of source data to replicate"),
-    REPLICATION_SOURCE_CLUSTER_FS_WRITE_ENDPOINT("drSourceClusterFS", "Source replication cluster end point"),
-    REPLICATION_TARGET_DIR("drTargetDir", "Location on target cluster for replication"),
-    REPLICATION_TARGET_CLUSTER_FS_WRITE_ENDPOINT("drTargetClusterFS", "Target replication cluster end point"),
-    REPLICATION_MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during replication"),
-    REPLICATION_MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication");
-
-    private final String name;
-    private final String description;
-    private final boolean isRequired;
-
-    HdfsReplicationRecipeToolOptions(String name, String description) {
-        this(name, description, true);
-    }
-
-    HdfsReplicationRecipeToolOptions(String name, String description, boolean isRequired) {
-        this.name = name;
-        this.description = description;
-        this.isRequired = isRequired;
-    }
-
-    public String getName() {
-        return this.name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-
-    public boolean isRequired() {
-        return isRequired;
-    }
-
-    @Override
-    public String toString() {
-        return getName();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeTool.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeTool.java b/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeTool.java
deleted file mode 100644
index 8b39673..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeTool.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.HCatDatabase;
-import org.apache.hive.hcatalog.api.HCatTable;
-import org.apache.hive.hcatalog.api.ObjectNotFoundException;
-import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.apache.hive.hcatalog.common.HCatException;
-
-import java.io.IOException;
-import java.util.Properties;
-
-/**
- * Hive Replication recipe tool for Falcon recipes.
- */
-public class HiveReplicationRecipeTool implements Recipe {
-    private static final String ALL_TABLES = "*";
-
-    @Override
-    public void validate(final Properties recipeProperties) throws Exception {
-        for (HiveReplicationRecipeToolOptions option : HiveReplicationRecipeToolOptions.values()) {
-            if (recipeProperties.getProperty(option.getName()) == null && option.isRequired()) {
-                throw new IllegalArgumentException("Missing argument: " + option.getName());
-            }
-        }
-
-        HCatClient sourceMetastoreClient = null;
-        HCatClient targetMetastoreClient = null;
-        try {
-            // Validate if DB exists - source and target
-            sourceMetastoreClient = getHiveMetaStoreClient(
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_SOURCE_METASTORE_URI.getName()),
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL.getName()),
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_SOURCE_HIVE2_KERBEROS_PRINCIPAL.getName()));
-
-            String sourceDbList = recipeProperties.getProperty(
-                    HiveReplicationRecipeToolOptions.REPLICATION_SOURCE_DATABASE.getName());
-
-            if (StringUtils.isEmpty(sourceDbList)) {
-                throw new Exception("No source DB specified in property file");
-            }
-
-            String sourceTableList = recipeProperties.getProperty(
-                    HiveReplicationRecipeToolOptions.REPLICATION_SOURCE_TABLE.getName());
-            if (StringUtils.isEmpty(sourceTableList)) {
-                throw new Exception("No source table specified in property file. For DB replication please specify * "
-                        + "for sourceTable");
-            }
-
-            String[] srcDbs = sourceDbList.split(",");
-            if (srcDbs.length <= 0) {
-                throw new Exception("No source DB specified in property file");
-            }
-            for (String db : srcDbs) {
-                if (!dbExists(sourceMetastoreClient, db)) {
-                    throw new Exception("Database " + db + " doesn't exist on source cluster");
-                }
-            }
-
-            if (!sourceTableList.equals(ALL_TABLES)) {
-                String[] srcTables = sourceTableList.split(",");
-                if (srcTables.length > 0) {
-                    for (String table : srcTables) {
-                        if (!tableExists(sourceMetastoreClient, srcDbs[0], table)) {
-                            throw new Exception("Table " + table + " doesn't exist on source cluster");
-                        }
-                    }
-                }
-            }
-
-            targetMetastoreClient = getHiveMetaStoreClient(
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_TARGET_METASTORE_URI.getName()),
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL.getName()),
-                    recipeProperties.getProperty(HiveReplicationRecipeToolOptions
-                            .REPLICATION_TARGET_HIVE2_KERBEROS_PRINCIPAL.getName()));
-            // Verify db exists on target
-            for (String db : srcDbs) {
-                if (!dbExists(targetMetastoreClient, db)) {
-                    throw new Exception("Database " + db + " doesn't exist on target cluster");
-                }
-            }
-        } finally {
-            if (sourceMetastoreClient != null) {
-                sourceMetastoreClient.close();
-            }
-            if (targetMetastoreClient != null) {
-                targetMetastoreClient.close();
-            }
-        }
-    }
-
-    @Override
-    public Properties getAdditionalSystemProperties(final Properties recipeProperties) {
-        Properties additionalProperties = new Properties();
-        String recipeName = recipeProperties.getProperty(RecipeToolOptions.RECIPE_NAME.getName());
-        // Add recipe name as Hive DR job
-        additionalProperties.put(HiveReplicationRecipeToolOptions.HIVE_DR_JOB_NAME.getName(), recipeName);
-        additionalProperties.put(HiveReplicationRecipeToolOptions.CLUSTER_FOR_JOB_RUN.getName(),
-                recipeProperties.getProperty(RecipeToolOptions.CLUSTER_NAME.getName()));
-        additionalProperties.put(HiveReplicationRecipeToolOptions.CLUSTER_FOR_JOB_RUN_WRITE_EP.getName(),
-                recipeProperties.getProperty(RecipeToolOptions.CLUSTER_HDFS_WRITE_ENDPOINT.getName()));
-        if (StringUtils.isNotEmpty(recipeProperties.getProperty(RecipeToolOptions.RECIPE_NN_PRINCIPAL.getName()))) {
-            additionalProperties.put(HiveReplicationRecipeToolOptions.CLUSTER_FOR_JOB_NN_KERBEROS_PRINCIPAL.getName(),
-                    recipeProperties.getProperty(RecipeToolOptions.RECIPE_NN_PRINCIPAL.getName()));
-        }
-        return additionalProperties;
-    }
-
-    private HCatClient getHiveMetaStoreClient(String metastoreUrl, String metastorePrincipal,
-                                              String hive2Principal) throws Exception {
-        try {
-            HiveConf hcatConf = createHiveConf(new Configuration(false), metastoreUrl,
-                    metastorePrincipal, hive2Principal);
-            return HCatClient.create(hcatConf);
-        } catch (IOException e) {
-            throw new Exception("Exception creating HCatClient: " + e.getMessage(), e);
-        }
-    }
-
-    private static HiveConf createHiveConf(Configuration conf, String metastoreUrl, String metastorePrincipal,
-                                           String hive2Principal) throws IOException {
-        HiveConf hcatConf = new HiveConf(conf, HiveConf.class);
-
-        hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUrl);
-        hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
-        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-
-        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-        if (StringUtils.isNotEmpty(metastorePrincipal)) {
-            hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, metastorePrincipal);
-            hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
-            hcatConf.set(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
-        }
-        if (StringUtils.isNotEmpty(hive2Principal)) {
-            hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname, hive2Principal);
-            hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, "kerberos");
-        }
-
-        return hcatConf;
-    }
-
-    private static boolean tableExists(HCatClient client, final String database, final String tableName)
-        throws Exception {
-        try {
-            HCatTable table = client.getTable(database, tableName);
-            return table != null;
-        } catch (ObjectNotFoundException e) {
-            System.out.println(e.getMessage());
-            return false;
-        } catch (HCatException e) {
-            throw new Exception("Exception checking if the table exists:" + e.getMessage(), e);
-        }
-    }
-
-    private static boolean dbExists(HCatClient client, final String database)
-        throws Exception {
-        try {
-            HCatDatabase db = client.getDatabase(database);
-            return db != null;
-        } catch (ObjectNotFoundException e) {
-            System.out.println(e.getMessage());
-            return false;
-        } catch (HCatException e) {
-            throw new Exception("Exception checking if the db exists:" + e.getMessage(), e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeToolOptions.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeToolOptions.java b/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeToolOptions.java
deleted file mode 100644
index ec0465d..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/HiveReplicationRecipeToolOptions.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-/**
- * Hive Recipe tool options.
- */
-public enum HiveReplicationRecipeToolOptions {
-    REPLICATION_SOURCE_CLUSTER("sourceCluster", "Replication source cluster name"),
-    REPLICATION_SOURCE_METASTORE_URI("sourceMetastoreUri", "Source Hive metastore uri"),
-    REPLICATION_SOURCE_HS2_URI("sourceHiveServer2Uri", "Source HS2 uri"),
-    REPLICATION_SOURCE_DATABASE("sourceDatabase", "List of databases to replicate"),
-    REPLICATION_SOURCE_TABLE("sourceTable", "List of tables to replicate"),
-    REPLICATION_SOURCE_STAGING_PATH("sourceStagingPath", "Location of source staging path"),
-    REPLICATION_SOURCE_NN("sourceNN", "Source name node"),
-    REPLICATION_SOURCE_NN_KERBEROS_PRINCIPAL("sourceNNKerberosPrincipal", "Source name node kerberos principal", false),
-    REPLICATION_SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL("sourceHiveMetastoreKerberosPrincipal",
-            "Source hive metastore kerberos principal", false),
-    REPLICATION_SOURCE_HIVE2_KERBEROS_PRINCIPAL("sourceHive2KerberosPrincipal",
-            "Source hiveserver2 kerberos principal", false),
-
-    REPLICATION_TARGET_CLUSTER("targetCluster", "Replication target cluster name"),
-    REPLICATION_TARGET_METASTORE_URI("targetMetastoreUri", "Target Hive metastore uri"),
-    REPLICATION_TARGET_HS2_URI("targetHiveServer2Uri", "Target HS2 uri"),
-    REPLICATION_TARGET_STAGING_PATH("targetStagingPath", "Location of target staging path"),
-    REPLICATION_TARGET_NN("targetNN", "Target name node"),
-    REPLICATION_TARGET_NN_KERBEROS_PRINCIPAL("targetNNKerberosPrincipal", "Target name node kerberos principal", false),
-    REPLICATION_TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL("targetHiveMetastoreKerberosPrincipal",
-            "Target hive metastore kerberos principal", false),
-    REPLICATION_TARGET_HIVE2_KERBEROS_PRINCIPAL("targetHive2KerberosPrincipal",
-            "Target hiveserver2 kerberos principal", false),
-
-    REPLICATION_MAX_EVENTS("maxEvents", "Maximum events to replicate"),
-    REPLICATION_MAX_MAPS("replicationMaxMaps", "Maximum number of maps used during replication"),
-    DISTCP_MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during distcp"),
-    REPLICATION_MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication"),
-    CLUSTER_FOR_JOB_RUN("clusterForJobRun", "Cluster on which replication job runs", false),
-    CLUSTER_FOR_JOB_NN_KERBEROS_PRINCIPAL("clusterForJobNNKerberosPrincipal",
-            "Write EP of cluster on which replication job runs", false),
-    CLUSTER_FOR_JOB_RUN_WRITE_EP("clusterForJobRunWriteEP", "Write EP of cluster on which replication job runs", false),
-    HIVE_DR_JOB_NAME("drJobName", "Unique hive DR job name", false);
-
-    private final String name;
-    private final String description;
-    private final boolean isRequired;
-
-    HiveReplicationRecipeToolOptions(String name, String description) {
-        this(name, description, true);
-    }
-
-    HiveReplicationRecipeToolOptions(String name, String description, boolean isRequired) {
-        this.name = name;
-        this.description = description;
-        this.isRequired = isRequired;
-    }
-
-    public String getName() {
-        return this.name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-
-    public boolean isRequired() {
-        return isRequired;
-    }
-
-    @Override
-    public String toString() {
-        return getName();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/Recipe.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/Recipe.java b/client/src/main/java/org/apache/falcon/recipe/Recipe.java
deleted file mode 100644
index 609131d..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/Recipe.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import java.util.Properties;
-
-/**
- * Recipe interface.
- */
-public interface Recipe {
-    void validate(final Properties recipeProperties) throws Exception;
-    Properties getAdditionalSystemProperties(final Properties recipeProperties);
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/RecipeFactory.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/RecipeFactory.java b/client/src/main/java/org/apache/falcon/recipe/RecipeFactory.java
deleted file mode 100644
index 32b0871..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/RecipeFactory.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import org.apache.falcon.cli.FalconCLI.RecipeOperation;
-
-/**
- * Recipe factory.
- */
-public final class RecipeFactory {
-
-    private RecipeFactory() {
-    }
-
-    public static Recipe getRecipeToolType(String recipeType) {
-        if (recipeType == null) {
-            return null;
-        }
-
-        if (RecipeOperation.HDFS_REPLICATION.toString().equalsIgnoreCase(recipeType)) {
-            return new HdfsReplicationRecipeTool();
-        } else if (RecipeOperation.HIVE_DISASTER_RECOVERY.toString().equalsIgnoreCase(recipeType)) {
-            return new HiveReplicationRecipeTool();
-        }
-        return null;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/RecipeTool.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/RecipeTool.java b/client/src/main/java/org/apache/falcon/recipe/RecipeTool.java
deleted file mode 100644
index 243ff4d..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/RecipeTool.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.recipe.util.RecipeProcessBuilderUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Base recipe tool for Falcon recipes.
- */
-public class RecipeTool extends Configured implements Tool {
-    private static final String HDFS_WF_PATH = "falcon" + File.separator + "recipes" + File.separator;
-    private static final FsPermission FS_PERMISSION =
-            new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE);
-    private static final String FS_DEFAULT_NAME_KEY = "fs.defaultFS";
-    private static final String NN_PRINCIPAL = "dfs.namenode.kerberos.principal";
-
-    public static void main(String[] args) throws Exception {
-        ToolRunner.run(new Configuration(), new RecipeTool(), args);
-    }
-
-    @Override
-    public int run(String[] arguments) throws Exception {
-
-        Map<RecipeToolArgs, String> argMap = setupArgs(arguments);
-        if (argMap == null || argMap.isEmpty()) {
-            throw new Exception("Arguments passed to recipe is null");
-        }
-        Configuration conf = getConf();
-        String recipePropertiesFilePath = argMap.get(RecipeToolArgs.RECIPE_PROPERTIES_FILE_ARG);
-        Properties recipeProperties = loadProperties(recipePropertiesFilePath);
-        validateProperties(recipeProperties);
-
-        String recipeOperation = argMap.get(RecipeToolArgs.RECIPE_OPERATION_ARG);
-        Recipe recipeType = RecipeFactory.getRecipeToolType(recipeOperation);
-        if (recipeType != null) {
-            recipeType.validate(recipeProperties);
-            Properties props = recipeType.getAdditionalSystemProperties(recipeProperties);
-            if (props != null && !props.isEmpty()) {
-                recipeProperties.putAll(props);
-            }
-        }
-
-        String processFilename;
-
-        FileSystem fs = getFileSystemForHdfs(recipeProperties, conf);
-        validateArtifacts(recipeProperties, fs);
-
-        String recipeName = recipeProperties.getProperty(RecipeToolOptions.RECIPE_NAME.getName());
-        copyFilesToHdfsIfRequired(recipeProperties, fs, recipeName);
-
-        processFilename = RecipeProcessBuilderUtils.createProcessFromTemplate(argMap.get(RecipeToolArgs
-                .RECIPE_FILE_ARG), recipeProperties, argMap.get(RecipeToolArgs.RECIPE_PROCESS_XML_FILE_PATH_ARG));
-
-
-        System.out.println("Generated process file to be scheduled: ");
-        System.out.println(FileUtils.readFileToString(new File(processFilename)));
-
-        System.out.println("Completed recipe processing");
-        return 0;
-    }
-
-    private Map<RecipeToolArgs, String> setupArgs(final String[] arguments) throws ParseException {
-        Options options = new Options();
-        Map<RecipeToolArgs, String> argMap = new HashMap<RecipeToolArgs, String>();
-
-        for (RecipeToolArgs arg : RecipeToolArgs.values()) {
-            addOption(options, arg, arg.isRequired());
-        }
-
-        CommandLine cmd = new GnuParser().parse(options, arguments);
-        for (RecipeToolArgs arg : RecipeToolArgs.values()) {
-            String optionValue = arg.getOptionValue(cmd);
-            if (StringUtils.isNotEmpty(optionValue)) {
-                argMap.put(arg, optionValue);
-            }
-        }
-        return argMap;
-    }
-
-    private static void addOption(final Options options, final RecipeToolArgs arg,
-                                  final boolean isRequired) {
-        Option option = arg.getOption();
-        option.setRequired(isRequired);
-        options.addOption(option);
-    }
-
-    private static void validateProperties(final Properties recipeProperties) {
-        for (RecipeToolOptions option : RecipeToolOptions.values()) {
-            if (recipeProperties.getProperty(option.getName()) == null && option.isRequired()) {
-                throw new IllegalArgumentException("Missing argument: " + option.getName());
-            }
-        }
-    }
-
-    private static Properties loadProperties(final String propertiesFilePath) throws Exception {
-        InputStream inputStream = null;
-        try {
-            inputStream = new FileInputStream(propertiesFilePath);
-            Properties prop = new Properties();
-            prop.load(inputStream);
-            return prop;
-        } finally {
-            IOUtils.closeQuietly(inputStream);
-        }
-    }
-
-    private static void validateArtifacts(final Properties recipeProperties, final FileSystem fs) throws Exception {
-        // validate the WF path
-        String wfPath = recipeProperties.getProperty(RecipeToolOptions.WORKFLOW_PATH.getName());
-
-        // Check if file exists on HDFS
-        if (StringUtils.isNotEmpty(wfPath) && !fs.exists(new Path(wfPath))) {
-            // If the file doesn't exist locally throw exception
-            if (!doesFileExist(wfPath)) {
-                throw new Exception("Recipe workflow file does not exist : " + wfPath + " on local FS or HDFS");
-            }
-        }
-
-        // validate lib path
-        String libPath = recipeProperties.getProperty(RecipeToolOptions.WORKFLOW_LIB_PATH.getName());
-        if (StringUtils.isNotEmpty(libPath) && !fs.exists(new Path(libPath))) {
-            if (!doesFileExist(libPath)) {
-                throw new Exception("Recipe lib file path does not exist : " + libPath + " on local FS or HDFS");
-            }
-        }
-    }
-
-    private static void copyFilesToHdfsIfRequired(final Properties recipeProperties,
-                                                  final FileSystem fs,
-                                                  final String recipeName) throws Exception {
-
-        String hdfsPath = HDFS_WF_PATH + recipeName + File.separator;
-
-        String recipeWfPathName = RecipeToolOptions.WORKFLOW_PATH.getName();
-        String wfPath = recipeProperties.getProperty(recipeWfPathName);
-        String wfPathValue;
-
-        // Copy only if files are on local FS
-        if (StringUtils.isNotEmpty(wfPath) && !fs.exists(new Path(wfPath))) {
-            createDirOnHdfs(hdfsPath, fs);
-            if (new File(wfPath).isDirectory()) {
-                wfPathValue = hdfsPath + getLastPartOfPath(wfPath);
-                copyFileFromLocalToHdfs(wfPath, hdfsPath, true, wfPathValue, fs);
-            } else {
-                wfPathValue = hdfsPath + new File(wfPath).getName();
-                copyFileFromLocalToHdfs(wfPath, hdfsPath, false, null, fs);
-            }
-            // Update the property with the hdfs path
-            recipeProperties.setProperty(recipeWfPathName,
-                    fs.getFileStatus(new Path(wfPathValue)).getPath().toString());
-            System.out.println("Copied WF to: " + recipeProperties.getProperty(recipeWfPathName));
-        }
-
-        String recipeWfLibPathName = RecipeToolOptions.WORKFLOW_LIB_PATH.getName();
-        String libPath = recipeProperties.getProperty(recipeWfLibPathName);
-        String libPathValue;
-        // Copy only if files are on local FS
-        boolean isLibPathEmpty = StringUtils.isEmpty(libPath);
-        if (!isLibPathEmpty && !fs.exists(new Path(libPath))) {
-            if (new File(libPath).isDirectory()) {
-                libPathValue = hdfsPath + getLastPartOfPath(libPath);
-                copyFileFromLocalToHdfs(libPath, hdfsPath, true, libPathValue, fs);
-            } else {
-                libPathValue = hdfsPath + "lib" + File.separator + new File(libPath).getName();
-                copyFileFromLocalToHdfs(libPath, libPathValue, false, null, fs);
-            }
-
-            // Update the property with the hdfs path
-            recipeProperties.setProperty(recipeWfLibPathName,
-                    fs.getFileStatus(new Path(libPathValue)).getPath().toString());
-            System.out.println("Copied WF libs to: " + recipeProperties.getProperty(recipeWfLibPathName));
-        } else if (isLibPathEmpty) {
-            // Replace ##workflow.lib.path## with "" to ignore lib in workflow template
-            recipeProperties.setProperty(recipeWfLibPathName, "");
-        }
-    }
-
-    private static String getLastPartOfPath(final String path) {
-        String normalizedWfPath = FilenameUtils.normalizeNoEndSeparator(path);
-        return (normalizedWfPath == null) ? FilenameUtils.getName(path)
-                : FilenameUtils.getName(normalizedWfPath);
-    }
-
-    private static void createDirOnHdfs(String path, FileSystem fs) throws IOException {
-        Path hdfsPath = new Path(path);
-        if (!fs.exists(hdfsPath)) {
-            FileSystem.mkdirs(fs, hdfsPath, FS_PERMISSION);
-        }
-    }
-
-    private static boolean doesFileExist(final String filename) {
-        return new File(filename).exists();
-    }
-
-    private static void copyFileFromLocalToHdfs(final String localFilePath,
-                                                final String hdfsFilePath,
-                                                final boolean copyDir,
-                                                final String hdfsFileDirPath,
-                                                final FileSystem fs) throws IOException {
-        /* If directory already exists and has contents, copyFromLocalFile with overwrite set to yes will fail with
-         * "Target is a directory". Delete the directory */
-        if (copyDir) {
-            Path hdfsPath = new Path(hdfsFileDirPath);
-            fs.delete(hdfsPath, true);
-        }
-
-        /* For cases where validation of process entity file fails, the artifacts would have been already copied to
-         * HDFS. Set overwrite to true so that next submit recipe copies updated artifacts from local FS to HDFS */
-        fs.copyFromLocalFile(false, true, new Path(localFilePath), new Path(hdfsFilePath));
-    }
-
-    private FileSystem getFileSystemForHdfs(final Properties recipeProperties,
-                                            final Configuration conf) throws Exception {
-        String storageEndpoint = RecipeToolOptions.CLUSTER_HDFS_WRITE_ENDPOINT.getName();
-        String nameNode = recipeProperties.getProperty(storageEndpoint);
-        conf.set(FS_DEFAULT_NAME_KEY, nameNode);
-        if (UserGroupInformation.isSecurityEnabled()) {
-            String nameNodePrincipal = recipeProperties.getProperty(RecipeToolOptions.RECIPE_NN_PRINCIPAL.getName());
-            conf.set(NN_PRINCIPAL, nameNodePrincipal);
-        }
-        return createFileSystem(UserGroupInformation.getLoginUser(), new URI(nameNode), conf);
-    }
-
-    private FileSystem createFileSystem(UserGroupInformation ugi, final URI uri,
-                                       final Configuration conf) throws Exception {
-        try {
-            final String proxyUserName = ugi.getShortUserName();
-            if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
-                return FileSystem.get(uri, conf);
-            }
-
-            return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
-                public FileSystem run() throws Exception {
-                    return FileSystem.get(uri, conf);
-                }
-            });
-        } catch (InterruptedException ex) {
-            throw new IOException("Exception creating FileSystem:" + ex.getMessage(), ex);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/RecipeToolArgs.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/RecipeToolArgs.java b/client/src/main/java/org/apache/falcon/recipe/RecipeToolArgs.java
deleted file mode 100644
index 79d8f18..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/RecipeToolArgs.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-
-/**
- * Recipe tool args.
- */
-public enum RecipeToolArgs {
-    RECIPE_FILE_ARG("file", "recipe template file path"),
-    RECIPE_PROPERTIES_FILE_ARG("propertiesFile", "recipe properties file path"),
-    RECIPE_PROCESS_XML_FILE_PATH_ARG(
-            "recipeProcessFilePath", "file path of recipe process to be submitted"),
-    RECIPE_OPERATION_ARG("recipeOperation", "recipe operation");
-
-    private final String name;
-    private final String description;
-    private final boolean isRequired;
-    RecipeToolArgs(String name, String description) {
-        this(name, description, true);
-    }
-
-    RecipeToolArgs(String name, String description, boolean isRequired) {
-        this.name = name;
-        this.description = description;
-        this.isRequired = isRequired;
-    }
-
-    public Option getOption() {
-        return new Option(this.name, true, this.description);
-    }
-
-    public String getName() {
-        return this.name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-
-    public boolean isRequired() {
-        return isRequired;
-    }
-
-    public String getOptionValue(CommandLine cmd) {
-        return cmd.getOptionValue(this.name);
-    }
-
-    @Override
-    public String toString() {
-        return getName();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/RecipeToolOptions.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/RecipeToolOptions.java b/client/src/main/java/org/apache/falcon/recipe/RecipeToolOptions.java
deleted file mode 100644
index 2a7a7a0..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/RecipeToolOptions.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe;
-
-import java.util.Map;
-import java.util.HashMap;
-
-/**
- * Recipe tool options.
- */
-public enum RecipeToolOptions {
-    RECIPE_NAME("falcon.recipe.name", "Recipe name", false),
-    CLUSTER_NAME("falcon.recipe.cluster.name", "Cluster name where replication job should run", false),
-    CLUSTER_HDFS_WRITE_ENDPOINT(
-            "falcon.recipe.cluster.hdfs.writeEndPoint", "Cluster HDFS write endpoint"),
-    CLUSTER_VALIDITY_START("falcon.recipe.cluster.validity.start", "Source cluster validity start", false),
-    CLUSTER_VALIDITY_END("falcon.recipe.cluster.validity.end", "Source cluster validity end", false),
-    WORKFLOW_NAME("falcon.recipe.workflow.name", "Workflow name", false),
-    WORKFLOW_PATH("falcon.recipe.workflow.path", "Workflow path", false),
-    WORKFLOW_LIB_PATH("falcon.recipe.workflow.lib.path", "WF lib path", false),
-    PROCESS_FREQUENCY("falcon.recipe.process.frequency", "Process frequency", false),
-    RETRY_POLICY("falcon.recipe.retry.policy", "Retry policy", false),
-    RETRY_DELAY("falcon.recipe.retry.delay", "Retry delay", false),
-    RETRY_ATTEMPTS("falcon.recipe.retry.attempts", "Retry attempts", false),
-    RETRY_ON_TIMEOUT("falcon.recipe.retry.onTimeout", "Retry onTimeout", false),
-    RECIPE_TAGS("falcon.recipe.tags", "Recipe tags", false),
-    RECIPE_ACL_OWNER("falcon.recipe.acl.owner", "Recipe acl owner", false),
-    RECIPE_ACL_GROUP("falcon.recipe.acl.group", "Recipe acl group", false),
-    RECIPE_ACL_PERMISSION("falcon.recipe.acl.permission", "Recipe acl permission", false),
-    RECIPE_NN_PRINCIPAL("falcon.recipe.nn.principal", "Recipe DFS NN principal", false),
-    RECIPE_NOTIFICATION_TYPE("falcon.recipe.notification.type", "Recipe Notification Type", false),
-    RECIPE_NOTIFICATION_ADDRESS("falcon.recipe.notification.receivers", "Recipe Email Notification receivers", false);
-
-    private final String name;
-    private final String description;
-    private final boolean isRequired;
-
-    private static Map<String, RecipeToolOptions> optionsMap = new HashMap<>();
-    static {
-        for (RecipeToolOptions c : RecipeToolOptions.values()) {
-            optionsMap.put(c.getName(), c);
-        }
-    }
-
-    public static Map<String, RecipeToolOptions> getOptionsMap() {
-        return optionsMap;
-    }
-
-    RecipeToolOptions(String name, String description) {
-        this(name, description, true);
-    }
-
-    RecipeToolOptions(String name, String description, boolean isRequired) {
-        this.name = name;
-        this.description = description;
-        this.isRequired = isRequired;
-    }
-
-    public String getName() {
-        return this.name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-
-    public boolean isRequired() {
-        return isRequired;
-    }
-
-    @Override
-    public String toString() {
-        return getName();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/recipe/util/RecipeProcessBuilderUtils.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/recipe/util/RecipeProcessBuilderUtils.java b/client/src/main/java/org/apache/falcon/recipe/util/RecipeProcessBuilderUtils.java
deleted file mode 100644
index 550ca1b..0000000
--- a/client/src/main/java/org/apache/falcon/recipe/util/RecipeProcessBuilderUtils.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.recipe.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Notification;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Property;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.entity.v0.process.Workflow;
-import org.apache.falcon.recipe.RecipeToolOptions;
-
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.bind.ValidationEvent;
-import javax.xml.bind.ValidationEventHandler;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.OutputStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Recipe builder utility.
- */
-public final class RecipeProcessBuilderUtils {
-
-    private static final Pattern RECIPE_VAR_PATTERN = Pattern.compile("##[A-Za-z0-9_.]*##");
-
-    private RecipeProcessBuilderUtils() {
-    }
-
-    public static String createProcessFromTemplate(final String processTemplateFile, final Properties recipeProperties,
-                                                   final String processFilename) throws Exception {
-        org.apache.falcon.entity.v0.process.Process process = bindAttributesInTemplate(
-                processTemplateFile, recipeProperties);
-        String recipeProcessFilename = createProcessXmlFile(processFilename, process);
-
-        validateProcessXmlFile(recipeProcessFilename);
-        return recipeProcessFilename;
-    }
-
-    private static org.apache.falcon.entity.v0.process.Process
-    bindAttributesInTemplate(final String templateFile, final Properties recipeProperties)
-        throws Exception {
-        if (templateFile == null || recipeProperties == null) {
-            throw new IllegalArgumentException("Invalid arguments passed");
-        }
-
-        Unmarshaller unmarshaller = EntityType.PROCESS.getUnmarshaller();
-        // Validation can be skipped for unmarshalling as we want to bind tempalte with the properties. Vaildation is
-        // hanles as part of marshalling
-        unmarshaller.setSchema(null);
-        unmarshaller.setEventHandler(new ValidationEventHandler() {
-                public boolean handleEvent(ValidationEvent validationEvent) {
-                    return true;
-                }
-            }
-        );
-
-        URL processResourceUrl = new File(templateFile).toURI().toURL();
-        org.apache.falcon.entity.v0.process.Process process =
-                (org.apache.falcon.entity.v0.process.Process) unmarshaller.unmarshal(processResourceUrl);
-
-        /* For optional properties user might directly set them in the process xml and might not set it in properties
-           file. Before doing the submission validation is done to confirm process xml doesn't have RECIPE_VAR_PATTERN
-        */
-
-        String processName = recipeProperties.getProperty(RecipeToolOptions.RECIPE_NAME.getName());
-        if (StringUtils.isNotEmpty(processName)) {
-            process.setName(processName);
-        }
-
-        // DR process template has only one cluster
-        bindClusterProperties(process.getClusters().getClusters().get(0), recipeProperties);
-
-        // bind scheduling properties
-        String processFrequency = recipeProperties.getProperty(RecipeToolOptions.PROCESS_FREQUENCY.getName());
-        if (StringUtils.isNotEmpty(processFrequency)) {
-            process.setFrequency(Frequency.fromString(processFrequency));
-        }
-
-        bindWorkflowProperties(process.getWorkflow(), recipeProperties);
-        bindRetryProperties(process.getRetry(), recipeProperties);
-        bindNotificationProperties(process.getNotification(), recipeProperties);
-        bindACLProperties(process.getACL(), recipeProperties);
-        bindTagsProperties(process, recipeProperties);
-        bindCustomProperties(process.getProperties(), recipeProperties);
-
-        return process;
-    }
-
-    private static void bindClusterProperties(final Cluster cluster,
-                                              final Properties recipeProperties) {
-        // DR process template has only one cluster
-        String clusterName = recipeProperties.getProperty(RecipeToolOptions.CLUSTER_NAME.getName());
-        if (StringUtils.isNotEmpty(clusterName)) {
-            cluster.setName(clusterName);
-        }
-
-        String clusterStartValidity = recipeProperties.getProperty(RecipeToolOptions.CLUSTER_VALIDITY_START.getName());
-        if (StringUtils.isNotEmpty(clusterStartValidity)) {
-            cluster.getValidity().setStart(SchemaHelper.parseDateUTC(clusterStartValidity));
-        }
-
-        String clusterEndValidity = recipeProperties.getProperty(RecipeToolOptions.CLUSTER_VALIDITY_END.getName());
-        if (StringUtils.isNotEmpty(clusterEndValidity)) {
-            cluster.getValidity().setEnd(SchemaHelper.parseDateUTC(clusterEndValidity));
-        }
-    }
-
-    private static void bindWorkflowProperties(final Workflow wf,
-                                               final Properties recipeProperties) {
-        String wfName = recipeProperties.getProperty(RecipeToolOptions.WORKFLOW_NAME.getName());
-        if (StringUtils.isNotEmpty(wfName)) {
-            wf.setName(wfName);
-        }
-
-        String wfLibPath = recipeProperties.getProperty(RecipeToolOptions.WORKFLOW_LIB_PATH.getName());
-        if (StringUtils.isNotEmpty(wfLibPath)) {
-            wf.setLib(wfLibPath);
-        } else if (wf.getLib().startsWith("##")) {
-            wf.setLib("");
-        }
-
-        String wfPath = recipeProperties.getProperty(RecipeToolOptions.WORKFLOW_PATH.getName());
-        if (StringUtils.isNotEmpty(wfPath)) {
-            wf.setPath(wfPath);
-        }
-    }
-
-    private static void bindRetryProperties(final Retry processRetry,
-                                            final Properties recipeProperties) {
-        String retryPolicy = recipeProperties.getProperty(RecipeToolOptions.RETRY_POLICY.getName());
-        if (StringUtils.isNotEmpty(retryPolicy)) {
-            processRetry.setPolicy(PolicyType.fromValue(retryPolicy));
-        }
-
-        String retryAttempts = recipeProperties.getProperty(RecipeToolOptions.RETRY_ATTEMPTS.getName());
-        if (StringUtils.isNotEmpty(retryAttempts)) {
-            processRetry.setAttempts(Integer.parseInt(retryAttempts));
-        }
-
-        String retryDelay = recipeProperties.getProperty(RecipeToolOptions.RETRY_DELAY.getName());
-        if (StringUtils.isNotEmpty(retryDelay)) {
-            processRetry.setDelay(Frequency.fromString(retryDelay));
-        }
-
-        String retryOnTimeout = recipeProperties.getProperty(RecipeToolOptions.RETRY_ON_TIMEOUT.getName());
-        if (StringUtils.isNotEmpty(retryOnTimeout)) {
-            processRetry.setOnTimeout(Boolean.valueOf(retryOnTimeout));
-        }
-    }
-
-    private static void bindNotificationProperties(final Notification processNotification,
-                                                   final Properties recipeProperties) {
-        processNotification.setType(recipeProperties.getProperty(
-                RecipeToolOptions.RECIPE_NOTIFICATION_TYPE.getName()));
-
-        String notificationAddress = recipeProperties.getProperty(
-                RecipeToolOptions.RECIPE_NOTIFICATION_ADDRESS.getName());
-        if (StringUtils.isNotBlank(notificationAddress)) {
-            processNotification.setTo(notificationAddress);
-        } else {
-            processNotification.setTo("NA");
-        }
-    }
-
-    private static void bindACLProperties(final ACL acl,
-                                          final Properties recipeProperties) {
-        String aclowner = recipeProperties.getProperty(RecipeToolOptions.RECIPE_ACL_OWNER.getName());
-        if (StringUtils.isNotEmpty(aclowner)) {
-            acl.setOwner(aclowner);
-        }
-
-        String aclGroup = recipeProperties.getProperty(RecipeToolOptions.RECIPE_ACL_GROUP.getName());
-        if (StringUtils.isNotEmpty(aclGroup)) {
-            acl.setGroup(aclGroup);
-        }
-
-        String aclPermission = recipeProperties.getProperty(RecipeToolOptions.RECIPE_ACL_PERMISSION.getName());
-        if (StringUtils.isNotEmpty(aclPermission)) {
-            acl.setPermission(aclPermission);
-        }
-    }
-
-    private static void bindTagsProperties(final org.apache.falcon.entity.v0.process.Process process,
-                                           final Properties recipeProperties) {
-        String falconSystemTags = process.getTags();
-        String tags = recipeProperties.getProperty(RecipeToolOptions.RECIPE_TAGS.getName());
-        if (StringUtils.isNotEmpty(tags)) {
-            if (StringUtils.isNotEmpty(falconSystemTags)) {
-                tags += ", " + falconSystemTags;
-            }
-            process.setTags(tags);
-        }
-    }
-
-
-    private static void bindCustomProperties(final org.apache.falcon.entity.v0.process.Properties customProperties,
-                                             final Properties recipeProperties) {
-        List<Property> propertyList = new ArrayList<>();
-
-        for (Map.Entry<Object, Object> recipeProperty : recipeProperties.entrySet()) {
-            if (RecipeToolOptions.getOptionsMap().get(recipeProperty.getKey().toString()) == null) {
-                addProperty(propertyList, (String) recipeProperty.getKey(), (String) recipeProperty.getValue());
-            }
-        }
-
-        customProperties.getProperties().addAll(propertyList);
-    }
-
-    private static void addProperty(List<Property> propertyList, String name, String value) {
-        Property prop = new Property();
-        prop.setName(name);
-        prop.setValue(value);
-        propertyList.add(prop);
-    }
-
-    private static String createProcessXmlFile(final String outFilename,
-                                               final Entity entity) throws Exception {
-        if (outFilename == null || entity == null) {
-            throw new IllegalArgumentException("Invalid arguments passed");
-        }
-
-        EntityType type = EntityType.PROCESS;
-        OutputStream out = null;
-        try {
-            out = new FileOutputStream(outFilename);
-            type.getMarshaller().marshal(entity, out);
-        } catch (JAXBException e) {
-            throw new Exception("Unable to serialize the entity object " + type + "/" + entity.getName(), e);
-        } finally {
-            IOUtils.closeQuietly(out);
-        }
-        return outFilename;
-    }
-
-    private static void validateProcessXmlFile(final String processFileName) throws Exception {
-        if (processFileName == null) {
-            throw new IllegalArgumentException("Invalid arguments passed");
-        }
-
-        String line;
-        BufferedReader reader = null;
-
-        try {
-            reader = new BufferedReader(new FileReader(processFileName));
-            while ((line = reader.readLine()) != null) {
-                Matcher matcher = RECIPE_VAR_PATTERN.matcher(line);
-                if (matcher.find()) {
-                    String variable = line.substring(matcher.start(), matcher.end());
-                    throw new Exception("Match not found for the template: " + variable
-                            + " in recipe template file. Please add it in recipe properties file");
-                }
-            }
-        } finally {
-            IOUtils.closeQuietly(reader);
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/APIResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/APIResult.java b/client/src/main/java/org/apache/falcon/resource/APIResult.java
deleted file mode 100644
index e67eb3a..0000000
--- a/client/src/main/java/org/apache/falcon/resource/APIResult.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.io.StringWriter;
-
-/**
- * APIResult is the output returned by all the APIs; status-SUCCEEDED or FAILED
- * message- detailed message.
- */
-@XmlRootElement(name = "result")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class APIResult {
-
-    private Status status;
-
-    private String message;
-
-    private String requestId;
-
-    private static final JAXBContext JAXB_CONTEXT;
-
-    static {
-        try {
-            JAXB_CONTEXT = JAXBContext.newInstance(APIResult.class);
-        } catch (JAXBException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /**
-     * API Result status.
-     */
-    public static enum Status {
-        SUCCEEDED, PARTIAL, FAILED
-    }
-
-    public APIResult(Status status, String message) {
-        super();
-        this.status = status;
-        this.message = message;
-        requestId = Thread.currentThread().getName();
-    }
-
-    protected APIResult() {
-        // private default constructor for JAXB
-    }
-
-    public Status getStatus() {
-        return status;
-    }
-
-    public String getMessage() {
-        return message;
-    }
-
-    public String getRequestId() {
-        return requestId;
-    }
-
-    public void setRequestId(String reqId) {
-        this.requestId = reqId;
-    }
-
-    @Override
-    public String toString() {
-        try {
-            StringWriter stringWriter = new StringWriter();
-            Marshaller marshaller = JAXB_CONTEXT.createMarshaller();
-            marshaller.marshal(this, stringWriter);
-            return stringWriter.toString();
-        } catch (JAXBException e) {
-            return e.getMessage();
-        }
-    }
-
-    public Object[] getCollection() {
-        return null;
-    }
-
-    public void setCollection(Object[] items) {
-    }
-}


[19/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceList.twiki b/docs/src/site/twiki/restapi/InstanceList.twiki
deleted file mode 100644
index 214c22f..0000000
--- a/docs/src/site/twiki/restapi/InstanceList.twiki
+++ /dev/null
@@ -1,151 +0,0 @@
----++  GET /api/instance/list/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get list of all instances of a given entity.
-
----++ Parameters
-   * :entity-type Valid options are cluster, feed or process.
-   * :entity-name Name of the entity.
-   * start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * By default, it is set to (end - (10 * entityFrequency)).
-   * end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * Default is set to now.
-   * colo <optional param> Colo on which the query should be run.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * filterBy <optional param>  Filter results by list of field:value pairs. Example: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-      * Supported filter fields are STATUS, CLUSTER, SOURCECLUSTER, STARTEDAFTER.
-      * Query will do an AND among filterBy fields.
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by  "status","startTime","endTime","cluster".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-   * allAttempts <optional query param> To get all the attempts for corresponding instances.
-   
----++ Results
-List of instances of given entity.
-
-The possible instance status returned and its meaning are as follows:
-   * WAITING - The instance is waiting for the corresponding data(feed) instances to become available.
-   * READY - The instance is ready to be scheduled. But, is waiting for scheduling conditions to be met. For example, limitation on number of instances that can be run in parallel.
-   * RUNNING - The instance is running on the workflow engine.
-   * FAILED - The instance has failed during execution.
-   * KILLED - The instance has been killed either manually or by the system.
-   * SUCCEEDED - The instance has executed successfully.
-   * SKIPPED - This instance was not executed, but was skipped. For example, when the execution order is LAST_ONLY, the older instances are skipped.
-   * ERROR - There was error while executing this instance on the workflow engine.
-   * UNDEFINED - The status of the instance could not be determined.
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/list/process/SampleProcess?colo=*&start=2012-04-03T07:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/e15bb378-d09f-4911-9df2-5334a45153d2\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/list/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&filterBy=STATUS:SUCCEEDED,CLUSTER:primary-cluster&orderBy=startTime&offset=2&numResults=2&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        },
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:42:26-07:00",
-            "startTime": "2013-10-21T14:41:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933397-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T08:00Z"
-        },
-    ],
-
-    "requestId": "default\/e15bb378-d09f-4911-9df2-5334a45153d2\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-
----+++ Rest Call
-<verbatim>
-GET https://localhost:15443/api/instance/status/process/oozie-mr-process?user.name=narayan&start=2013-11-15T00:05Z&end=2013-11-15T01:00Z&colo=*&offset=0&allAttempts=true
-</verbatim>
----+++ Result
-<verbatim>
-{
-   "status":"SUCCEEDED",
-   "message":"default/STATUS\n",
-   "requestId":"default/942519651@qtp-1386909980-16 - 5b11a8ba-402b-4cc7-969c-256e0ed18ae2\n",
-   "instances":[
-      {
-         "instance":"2013-11-15T00:05Z",
-         "status":"SUCCEEDED",
-         "logFile":"http://IM1948-X0:11000/oozie?job=0000010-160106121750678-oozie-oozi-W",
-         "cluster":"local",
-         "startTime":"2016-01-06T12:39:22+05:30",
-         "endTime":"2016-01-06T12:40:05+05:30",
-         "runId":0,
-         "details":"",
-         "actions":[
-            {
-               "action":"mr-node",
-               "status":"SUCCEEDED",
-               "logFile":"http://localhost:8088/proxy/application_1452062826344_0010/"
-            }
-         ]
-      },
-      {
-         "instance":"2013-11-15T00:05Z",
-         "status":"SUCCEEDED",
-         "logFile":"http://IM1948-X0:11000/oozie?job=0000011-160106121750678-oozie-oozi-W",
-         "cluster":"local",
-         "startTime":"2016-01-06T12:40:27+05:30",
-         "endTime":"2016-01-06T12:41:05+05:30",
-         "runId":0,
-         "details":"",
-         "actions":[
-            {
-               "action":"mr-node",
-               "status":"SUCCEEDED",
-               "logFile":"http://localhost:8088/proxy/application_1452062826344_0012/"
-            }
-         ]
-      }
-   ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceLogs.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceLogs.twiki b/docs/src/site/twiki/restapi/InstanceLogs.twiki
deleted file mode 100644
index 1e1c98d..0000000
--- a/docs/src/site/twiki/restapi/InstanceLogs.twiki
+++ /dev/null
@@ -1,113 +0,0 @@
----++ GET /api/instance/logs/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get log of a specific instance of an entity.
-
----++ Parameters
-   * :entity-type Valid options are cluster, feed or process.
-   * :entity-name Name of the entity.
-   * start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * By default, it is set to (end - (10 * entityFrequency)).
-   * end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * Default is set to now.
-   * colo <optional param> Colo on which the query should be run.
-   * runId <optional param> Run Id.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * filterBy <optional param>  Filter results by list of field:value pairs. Example: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-      * Supported filter fields are STATUS, CLUSTER, SOURCECLUSTER, STARTEDAFTER.
-      * Query will do an AND among filterBy fields.
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by "status","startTime","endTime","cluster".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Log of specified instance.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/logs/process/SampleProcess?colo=*&start=2012-04-03T07:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "actions": [
-                {
-                    "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/000\/pig_SUCCEEDED.log",
-                    "status": "SUCCEEDED",
-                    "action": "pig"
-                }
-            ],
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/000\/oozie.log",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/3527038e-8334-4e50-8173-76c4fa430d0b\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/logs/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&filterBy=STATUS:SUCCEEDED,CLUSTER:primary-cluster&orderBy=startTime&offset=2&numResults=2&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "actions": [
-                {
-                    "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/000\/pig_SUCCEEDED.log",
-                    "status": "SUCCEEDED",
-                    "action": "pig"
-                }
-            ],
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/000\/oozie.log",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        },
-        {
-            "actions": [
-                {
-                    "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/001\/pig_SUCCEEDED.log",
-                    "status": "SUCCEEDED",
-                    "action": "pig"
-                }
-            ],
-            "details": "",
-            "endTime": "2013-10-21T14:42:27-07:00",
-            "startTime": "2013-10-21T14:41:57-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:50070\/data\/apps\/falcon\/staging\/falcon\/workflows\/process\/SampleProcess\/logs\/job-2012-04-03-07-00\/001\/oozie.log",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T08:00Z"
-        }
-    ],
-    "requestId": "default\/3527038e-8334-4e50-8173-76c4fa430d0b\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceParams.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceParams.twiki b/docs/src/site/twiki/restapi/InstanceParams.twiki
deleted file mode 100644
index 7a340a5..0000000
--- a/docs/src/site/twiki/restapi/InstanceParams.twiki
+++ /dev/null
@@ -1,83 +0,0 @@
----++  GET /api/instance/params/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get the params passed to the workflow for an instance of feed/process.
-
----++ Parameters
-   * :entity-type Valid options are cluster, feed or process.
-   * :entity-name Name of the entity.
-   * start should be the nominal time of the instance for which you want the params to be returned
-   * colo <optional param> Colo on which the query should be run.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
-
----++ Results
-List of instances currently running.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-http://userqa.user.com:16000/api/instance/params/process/Sample-Process?start=2014-10-01T11:00Z&colo=*&doAs=joe
-</verbatim>
----+++ Result
-{
-    "status": "SUCCEEDED",
-    "message": "ua1/PARAMS\n",
-    "requestId": "ua1/807e9fe6-ba60-490e-b720-f8dc8b92063e\n",
-    "instances": [
-        {
-            "instance": "2014-10-01T11:00Z",
-            "status": "RUNNING",
-            "logFile": "http://spyke.user.com:11000/oozie?job=0000211-141117203201940-oozie-oozi-W",
-            "cluster": "sample-cluster",
-            "startTime": "2014-11-19T19:46:29+08:00",
-            "details": "",
-            "actions": [
-                {
-                    "action": "succeeded-post-processing",
-                    "status": "RUNNING",
-                    "logFile": "http://spyke.user.com:50030/jobdetails.jsp?jobid=job_201411071450_1052"
-                }
-            ],
-            "params": {
-                "entry": {"key": "jobTracker", "value": "10.16.114.113:8021"},
-                "entry":{"key":"falconInputNames","value":"IGNORE"},
-                "entry":{"key":"shouldRecord","value":"false"},
-                "entry":{"key":"timeStamp","value":"2014-11-19-11-46"},
-                "entry":{"key":"falconInPaths","value":"IGNORE"},
-                "entry":{"key":"broker.url","value":"tcp://localhost:61616"},
-                "entry":{"key":"feedNames","value":"NONE"},
-                "entry":{"key":"falcon.libpath","value":"/path/falcon/sample/lib"},
-                "entry":{"key":"ENTITY_PATH","value":"/path/falcon/staging/falcon/workflows/process/Sample-Process/9506be19980e0e6fdb709e1baffff_1416397585511/DEFAULT"},
-                "entry":{"key":"entityType","value":"process"},
-                "entry":{"key":"nominalTime","value":"2014-10-01-11-00"},
-                "entry":{"key":"feedInstancePaths","value":"IGNORE"},
-                "entry":{"key":"oozie.bundle.application.path","value":"hdfs://10.16.104.13:8020/path/falcon/staging/falcon/workflows/process/Sample-Process/9506be19980e0e669709e1baffff_1416397585511"},
-                "entry":{"key":"logDir","value":"hdfs://10.16.104.13:8020/path/falcon/staging/falcon/workflows/process/Sample-Process/logs"},
-                "entry":{"key":"userWorkflowEngine","value":"oozie"},
-                "entry":{"key":"broker.ttlInMins","value":"4320"},
-                "entry":{"key":"oozie.use.system.libpath","value":"true"},
-                "entry":{"key":"queueName","value":"reports"},
-                "entry":{"key":"falconDataOperation","value":"GENERATE"},
-                "entry":{"key":"oozie.wf.external.id","value":"Sample-Process/DEFAULT/2014-10-01T11:00Z"},
-                "entry":{"key":"workflowEngineUrl","value":"http://10.11.100.10:11000/oozie/"},
-                "entry":{"key":"userBrokerImplClass","value":"org.apache.activemq.ActiveMQConnectionFactory"},
-                "entry":{"key":"ENTITY_NAME","value":"FALCON_PROCESS_DEFAULT_Sample-Process"},
-                "entry":{"key":"broker.impl.class","value":"org.apache.activemq.ActiveMQConnectionFactory"},
-                "entry":{"key":"userWorkflowName","value":"Sample-workflow"},
-                "entry":{"key":"entityName","value":"Sample-Process"},
-                "entry":{"key":"srcClusterName","value":"NA"},
-                "entry":{"key":"userBrokerUrl","value":"tcp://localhost:61616?daemon=true"},
-                "entry":{"key":"falconInputFeeds","value":"NONE"},
-                "entry":{"key":"user.name","value":"sampleuser"},
-                "entry":{"key":"threedaysback","value":"2014-09-28"},
-                "entry":{"key":"userWorkflowVersion","value":"1.0"}
-            }
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceRerun.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceRerun.twiki b/docs/src/site/twiki/restapi/InstanceRerun.twiki
deleted file mode 100644
index eef0e1a..0000000
--- a/docs/src/site/twiki/restapi/InstanceRerun.twiki
+++ /dev/null
@@ -1,66 +0,0 @@
----++  POST /api/instance/rerun/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Rerun instances of an entity. On issuing a rerun, by default the execution resumes from the last failed node in the workflow.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * start is the start time of the instance that you want to refer to
-   * end is the end time of the instance that you want to refer to
-   * lifecycle <optional param> can be Eviction/Replication(default) for feed and Execution(default) for process.
-   * force <optional param> can be used to forcefully rerun the entire instance.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Results of the rerun command.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/instance/rerun/process/SampleProcess?colo=*&start=2013-04-03T07:00Z&end=2014-04-03T07:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "startTime": "2013-10-21T15:10:47-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "RUNNING",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/7a3582bd-608c-45a7-9b74-1837b51ba6d5\n",
-    "message": "default\/RERUN\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-<verbatim>
-POST http://localhost:15000/api/instance/rerun/process/SampleProcess?colo=*&start=2013-04-03T07:00Z&end=2014-04-03T07:00Z&force=true&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "startTime": "2013-10-21T15:10:47-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "RUNNING",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/7a3582bd-608c-45a7-9b74-1837b51ba6d5\n",
-    "message": "default\/RERUN\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceResume.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceResume.twiki b/docs/src/site/twiki/restapi/InstanceResume.twiki
deleted file mode 100644
index 1254785..0000000
--- a/docs/src/site/twiki/restapi/InstanceResume.twiki
+++ /dev/null
@@ -1,43 +0,0 @@
----++  POST /api/instance/resume/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Resume suspended instances of an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * start is the start time of the instance(s) that you want to refer to
-   * end is the end time of the instance(s) that you want to refer to
-   * lifecycle <optional param> can be Eviction/Replication(default) for feed and Execution(default) for process.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Results of the resume command.
-
----++ Examples
----+++ Rest Call
-           <verbatim>
-           POST http://localhost:15000/api/instance/resume/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&end=2014-04-03T07:00Z&doAs=joe
-           </verbatim>
-           ---+++ Result
-           <verbatim>
-           {
-               "instances": [
-                   {
-                       "details": "",
-                       "startTime": "2013-10-21T15:19:57-07:00",
-                       "cluster": "primary-cluster",
-                       "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-                       "status": "RUNNING",
-                       "instance": "2012-04-03T07:00Z"
-                   }
-               ],
-               "requestId": "default\/e88ff2e0-2af7-4829-a360-f92e95be2981\n",
-               "message": "default\/RESUME\n",
-               "status": "SUCCEEDED"
-           }
-           </verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceRunning.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceRunning.twiki b/docs/src/site/twiki/restapi/InstanceRunning.twiki
deleted file mode 100644
index 3d1cabc..0000000
--- a/docs/src/site/twiki/restapi/InstanceRunning.twiki
+++ /dev/null
@@ -1,84 +0,0 @@
----++  GET /api/instance/running/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get a list of instances currently running for a given entity.
-
----++ Parameters
-   * :entity-type Valid options are cluster, feed or process.
-   * :entity-name Name of the entity.
-   * colo <optional param> Colo on which the query should be run.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * filterBy <optional param>  Filter results by list of field:value pairs. Example: filterBy=CLUSTER:primary-cluster
-      * Supported filter fields are CLUSTER, SOURCECLUSTER, STARTEDAFTER.
-      * Query will do an AND among filterBy fields.
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by "status","startTime","endTime","cluster".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-List of instances currently running.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/running/process/SampleProcess?colo=*
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "startTime": "2013-10-21T14:39:28-07:00",
-            "cluster": "primary-cluster",
-            "status": "RUNNING",
-            "instance": "2012-04-03T06:00Z"
-        }
-    ],
-    "requestId": "default\/12e9a7d4-3b4f-4a76-b471-c8f3786a62a0\n",
-    "message": "default\/Running Instances\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/running/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&filterBy=CLUSTER:primary-cluster&orderBy=startTime&offset=2&numResults=2&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "RUNNING",
-            "instance": "2012-04-03T07:00Z"
-        },
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:42:27-07:00",
-            "startTime": "2013-10-21T14:41:57-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933397-oozie-rgau-W",
-            "status": "RUNNING",
-            "instance": "2012-04-03T08:00Z"
-        },
-    ],
-
-    "requestId": "default\/e15bb378-d09f-4911-9df2-5334a45153d2\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceStatus.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceStatus.twiki b/docs/src/site/twiki/restapi/InstanceStatus.twiki
deleted file mode 100644
index 2b7b643..0000000
--- a/docs/src/site/twiki/restapi/InstanceStatus.twiki
+++ /dev/null
@@ -1,98 +0,0 @@
----++  GET /api/instance/status/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get status of a specific instance of an entity.
-
----++ Parameters
-   * :entity-type Valid options are cluster, feed or process.
-   * :entity-name Name of the entity.
-   * start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * By default, it is set to (end - (10 * entityFrequency)).
-   * end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * Default is set to now.
-   * colo <optional param> Colo on which the query should be run.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * filterBy <optional param>  Filter results by list of field:value pairs. Example: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-      * Supported filter fields are STATUS, CLUSTER, SOURCECLUSTER, STARTEDAFTER.
-      * Query will do an AND among filterBy fields.
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by "status","startTime","endTime","cluster".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-   * allAttempts <optional query param> To get all the attempts for corresponding instances.
-   
----++ Results
-Status of the specified instance along with job urls for all actions of user workflow and non-succeeded actions of the main-workflow.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET https://localhost:15443/api/instance/status/process/WordCount?start=2014-11-04T16:00Z&colo=*
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2014-11-05T16:08:10+05:30",
-            "startTime": "2014-11-05T16:07:29+05:30",
-            "cluster": "local",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000011-141105155430303-oozie-oozi-W",
-            "status": "SUCCEEDED",
-            "instance": "2014-11-04T16:00Z",
-            "actions": [
-                {
-                    "action": "wordcount-mr",
-                    "status": "SUCCEEDED",
-                    "logFile": "http:\/\/localhost:50030\/jobdetails.jsp?jobid=job_201411051553_0005"
-                }
-            ]
-        }
-    ],
-    "requestId": "default\/b9fc3cba-1b46-4d1f-8196-52c795ea3580\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/status/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&filterBy=STATUS:SUCCEEDED,CLUSTER:primary-cluster&orderBy=startTime&offset=2&numResults=2&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:40:26-07:00",
-            "startTime": "2013-10-21T14:39:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        },
-        {
-            "details": "",
-            "endTime": "2013-10-21T14:42:26-07:00",
-            "startTime": "2013-10-21T14:41:56-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933397-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T08:00Z"
-        },
-    ],
-
-    "requestId": "default\/e15bb378-d09f-4911-9df2-5334a45153d2\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceSummary.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceSummary.twiki b/docs/src/site/twiki/restapi/InstanceSummary.twiki
deleted file mode 100644
index 0e1ffee..0000000
--- a/docs/src/site/twiki/restapi/InstanceSummary.twiki
+++ /dev/null
@@ -1,114 +0,0 @@
----++  GET /api/instance/summary/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get summary of instance/instances of an entity.
-
----++ Parameters
-   * :entity-type Valid options are feed or process.
-   * :entity-name Name of the entity.
-   * start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-       * By default, it is set to (end - (10 * entityFrequency)).
-   * end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-       * Default is set to now.
-   * colo <optional param> Colo on which the query should be run.
-   * lifecycle <optional param> Valid lifecycles for feed are Eviction/Replication(default) and for process is Execution(default).
-   * filterBy <optional param>  Filter results by list of field:value pairs.
-   Example1: filterBy=STATUS:RUNNING,CLUSTER:primary-cluster
-   Example2: filterBy=Status:RUNNING,Status:KILLED
-       * Supported filter fields are STATUS, CLUSTER
-       * Query will do an AND among filterBy fields.
-   * orderBy <optional param> Field by which results should be ordered.
-       * Supports ordering by "cluster".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   Example: orderBy=cluster sortOrder=asc
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Summary of the instances over the specified time range
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/summary/process/WordCount?colo=*&start=2014-01-21T13:00Z&end=2014-01-21T16:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "status":"SUCCEEDED",
-    "message":"default/SUMMARY\n",
-    "requestId":"default/c344567b-da73-44d5-bcd4-bf456524934c\n",
-    "instancesSummary":
-        {
-            "cluster":"local",
-            "map":
-                {
-                    "entry":
-                        {
-                            "key":"SUCCEEDED",
-                            "value":"value"
-                         }
-                }
-            }
-        }
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET https://localhost:16443/api/instance/summary/process/WordCount?filterBy=Status:KILLED,Status:RUNNING&start=2015-06-24T16:00Z&end=2015-06-24T23:00Z&colo=*
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "status":"SUCCEEDED",
-    "message":"local/SUMMARY\n",
-    "requestId":"local/1246061948@qtp-1059149611-5 - 34d8c3bb-f461-4fd5-87cd-402c9c6b1ed2\n",
-    "instancesSummary":[
-        {
-            "cluster":"local",
-            "map":{
-                "entry":{
-                    "key":"RUNNING",
-                    "value":"1"
-                },
-                "entry":{
-                    "key":"KILLED",
-                    "value":"1"
-                }
-            }
-        }
-    ]
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET https://localhost:16443/api/instance/summary/process/WordCount?orderBy=cluster&sortOrder=asc&start=2015-06-24T16:00Z&end=2015-06-24T23:00Z&colo=*&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "status":"SUCCEEDED",
-    "message":"local/SUMMARY\n",
-    "requestId":"local/1246061948@qtp-1059149611-5 - 42e2040d-6b6e-4bfd-a090-83db5ed1a429\n",
-    "instancesSummary":[
-        {
-            "cluster":"local",
-            "map":{
-                "entry":{
-                    "key":"SUCCEEDED",
-                    "value":"6"
-                },
-                "entry":{
-                    "key":"KILLED",
-                    "value":"1"
-                }
-            }
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceSuspend.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceSuspend.twiki b/docs/src/site/twiki/restapi/InstanceSuspend.twiki
deleted file mode 100644
index 2ba8663..0000000
--- a/docs/src/site/twiki/restapi/InstanceSuspend.twiki
+++ /dev/null
@@ -1,44 +0,0 @@
----++  POST /api/instance/suspend/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Suspend instances of an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * start is the start time of the instance(s) that you want to refer to
-   * end is the end time of the instance(s) that you want to refer to
-   * lifecycle <optional param> can be Eviction/Replication(default) for feed and Execution(default) for process.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Results of the suspend command.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/instance/suspend/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&end=2014-04-03T07:00Z&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T15:15:01-07:00",
-            "startTime": "2013-10-21T15:14:32-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "SUCCEEDED",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/ff07e45b-b6da-4f47-ae96-9182bd8a7e53\n",
-    "message": "default\/SUSPEND\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/MetadataList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/MetadataList.twiki b/docs/src/site/twiki/restapi/MetadataList.twiki
deleted file mode 100644
index 98abf46..0000000
--- a/docs/src/site/twiki/restapi/MetadataList.twiki
+++ /dev/null
@@ -1,31 +0,0 @@
----++  GET api/metadata/discovery/:type/list
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get all dimensions of specified type.
-
----++ Parameters
-   * :type Valid dimension types are cluster_entity,feed_entity, process_entity, user, colo, tags, groups, pipelines
-   * cluster <optional query param> Show dimensions related to this cluster.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
-
----++ Results
-List of dimensions that match requested type [and cluster].
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/discovery/process_entity/list?cluster=primary-cluster&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": ["sampleIngestProcess","testProcess","anotherProcess"],
-    "totalSize": 3
-}
-</verbatim>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/MetadataRelations.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/MetadataRelations.twiki b/docs/src/site/twiki/restapi/MetadataRelations.twiki
deleted file mode 100644
index b29fd2a..0000000
--- a/docs/src/site/twiki/restapi/MetadataRelations.twiki
+++ /dev/null
@@ -1,46 +0,0 @@
----++  GET api/metadata/discovery/:dimension-type/:dimension-name/relations
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get all relations of a specific dimension.
-
----++ Parameters
-   * :type Valid dimension types are cluster_entity,feed_entity, process_entity, user, colo, tags, groups, pipelines
-   * :name Name of the dimension.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Get all relations of a specific dimension.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/discovery/process_entity/sample-process/relations?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "timestamp":"2014-09-09T01:31Z",
-    "userWorkflowEngine":"pig",
-    "name":"sample-process",
-    "type":"PROCESS_ENTITY",
-    "userWorkflowName":"imp-click-join-workflow",
-    "version":"1.0.9",
-    "inVertices":[
-        {"name":"clicks-feed","type":"FEED_ENTITY","label":"input"},
-        {"name":"impression-feed","type":"FEED_ENTITY","label":"input"},
-        {"name":"sample-process\/2014-01-01T01:00Z","type":"PROCESS_INSTANCE","label":"instance-of"}
-    ],
-    "outVertices":[
-        {"name":"Critical","type":"TAGS","label":"classified-as"},
-        {"name":"testPipeline","type":"PIPELINES","label":"pipeline"},
-        {"name":"primary-cluster","type":"CLUSTER_ENTITY","label":"runs-on"},
-        {"name":"imp-click-join2","type":"FEED_ENTITY","label":"output"},
-        {"name":"imp-click-join1","type":"FEED_ENTITY","label":"output"},
-        {"name":"falcon-user","type":"USER","label":"owned-by"}
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/ResourceList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/ResourceList.twiki b/docs/src/site/twiki/restapi/ResourceList.twiki
deleted file mode 100644
index 34c2c6f..0000000
--- a/docs/src/site/twiki/restapi/ResourceList.twiki
+++ /dev/null
@@ -1,93 +0,0 @@
----+ RESTful Resources
-
----++ Resource List
-   * <a href="#REST_Call_on_Entity_Resource">REST Call on Entity Resource</a>
-   * <a href="#REST_Call_on_Feed_and_Process_Instances">REST Call on Feed/Process Instances</a>
-   * <a href="#REST_Call_on_Admin_Resource">REST Call on Admin Resource</a>
-   * <a href="#REST_Call_on_Lineage_Graph">REST Call on Lineage Graph Resource</a>
-   * <a href="#REST_Call_on_Metadata_Resource">REST Call on Metadata Resource</a>
-
----++ Authentication
-
-When security is off (Pseudo/Simple), the authenticated user is the username specified in the user.name query
-parameter. If the user.name parameter is not set, the server may either set the authenticated user to a default web
-user, if there is any, or return an error response.
-
-When security is on (kerberos), authentication is performed by Kerberos SPNEGO.
-
-Below are examples using the curl command tool.
-
-Authentication when security is off (Pseudo/Simple):
-<verbatim>
-curl -i "http://<HOST>:<PORT>/<PATH>?[user.name=<USER>&]<PARAM>=..."
-</verbatim>
-
-Authentication using Kerberos SPNEGO when security is on:
-<verbatim>
-curl -i --negotiate -u : "http://<HOST>:<PORT>/<PATH>?<PARAM>=..."
-</verbatim>
-
-See also: [[../Security.twiki][Security in Falcon]]
-
-The current version of the rest api's documentation is also hosted on the Falcon server and Prism Server (in distributed mode) at the url http://<HOST>:<PORT>/docs
-
----++ REST Call on Admin Resource
-
-| *Call Type* | *Resource*                                     | *Description*                               |
-| GET         | [[AdminStack][api/admin/stack]]                | Get stack of the server                     |
-| GET         | [[AdminVersion][api/admin/version]]            | Get version of the server                   |
-| GET         | [[AdminConfig][api/admin/config/:config-type]] | Get configuration information of the server |
-
----++ REST Call on Entity Resource
-
-| *Call Type* | *Resource*                                                                  | *Description*                      |
-| POST        | [[EntityValidate][api/entities/validate/:entity-type]]                      | Validate the entity                |
-| POST        | [[EntitySubmit][api/entities/submit/:entity-type]]                          | Submit the entity                  |
-| POST        | [[EntityUpdate][api/entities/update/:entity-type/:entity-name]]             | Update the entity                  |
-| POST        | [[EntitySubmitAndSchedule][api/entities/submitAndSchedule/:entity-type]]    | Submit & Schedule the entity       |
-| POST        | [[EntitySchedule][api/entities/schedule/:entity-type/:entity-name]]         | Schedule the entity                |
-| POST        | [[EntitySuspend][api/entities/suspend/:entity-type/:entity-name]]           | Suspend the entity                 |
-| POST        | [[EntityResume][api/entities/resume/:entity-type/:entity-name]]             | Resume the entity                  |
-| DELETE      | [[EntityDelete][api/entities/delete/:entity-type/:entity-name]]             | Delete the entity                  |
-| GET         | [[EntityStatus][api/entities/status/:entity-type/:entity-name]]             | Get the status of the entity       |
-| GET         | [[EntityDefinition][api/entities/definition/:entity-type/:entity-name]]     | Get the definition of the entity   |
-| GET         | [[EntityList][api/entities/list/:entity-type]]                              | Get the list of entities           |
-| GET         | [[EntitySummary][api/entities/summary/:entity-type/:cluster]]               | Get instance summary of all entities |
-| GET         | [[EntityDependencies][api/entities/dependencies/:entity-type/:entity-name]] | Get the dependencies of the entity |
-| GET         | [[FeedSLA][api/entities/sla-alert/:entity-type]]                            | Get pending feed instances which missed sla |
-| GET         | [[FeedLookup][api/entities/lookup/feed/]]                                   | Get feed for given path            |
-
----++ REST Call on Feed and Process Instances
-
-| *Call Type* | *Resource*                                                                  | *Description*                |
-| GET         | [[InstanceRunning][api/instance/running/:entity-type/:entity-name]]         | List of running instances.   |
-| GET         | [[InstanceParams][api/instance/params/:entity-type/:entity-name]]           | List of entity instances along with their workflow params.   |
-| GET         | [[InstanceList][api/instance/list/:entity-type/:entity-name]]               | List of instances   |
-| GET         | [[InstanceStatus][api/instance/status/:entity-type/:entity-name]]           | Status of a given instance   |
-| POST        | [[InstanceKill][api/instance/kill/:entity-type/:entity-name]]               | Kill a given instance        |
-| POST        | [[InstanceSuspend][api/instance/suspend/:entity-type/:entity-name]]         | Suspend a running instance   |
-| POST        | [[InstanceResume][api/instance/resume/:entity-type/:entity-name]]           | Resume a given instance      |
-| POST        | [[InstanceRerun][api/instance/rerun/:entity-type/:entity-name]]             | Rerun a given instance       |
-| GET         | [[InstanceLogs][api/instance/logs/:entity-type/:entity-name]]               | Get logs of a given instance |
-| GET         | [[Triage][api/instance/triage/:entity-type/:entity-name]]                   | Triage an instance to see it's stuck lineage |
-| GET         | [[InstanceSummary][api/instance/summary/:entity-type/:entity-name]]         | Return summary of instances for an entity |
-| GET         | [[InstanceDependency][api/instance/dependencies/:entity-type/:entity-name]] | Return dependent instances for a given instance |
-
----++ REST Call on Metadata Lineage Resource
-
-| *Call Type* | *Resource*                                                                             | *Description*                                                                 |
-| GET         | [[Graph][api/metadata/lineage/serialize]]                                              | dump the graph                                                                |
-| GET         | [[AllVertices][api/metadata/lineage/vertices/all]]                                     | get all vertices                                                              |
-| GET         | [[Vertices][api/metadata/lineage/vertices?key=:key&value=:value]]                      | get all vertices for a key index                                              |
-| GET         | [[Vertex][api/metadata/lineage/vertices/:id]]                                          | get the vertex with the specified id                                          |
-| GET         | [[VertexProperties][api/metadata/lineage/vertices/properties/:id?relationships=:true]] | get the properties of the vertex with the specified id                        |
-| GET         | [[AdjacentVertices][api/metadata/lineage/vertices/:id/:direction]]                     | get the adjacent vertices or edges of the vertex with the specified direction |
-| GET         | [[AllEdges][api/metadata/lineage/edges/all]]                                           | get all edges                                                                 |
-| GET         | [[Edge][api/metadata/lineage/edges/:id]]                                               | get the edge with the specified id                                            |
-| GET         | [[EntityLineage][api/metadata/lineage/entities?pipeline=:name]]                        | Get lineage graph for processes and feeds in the specified pipeline           |
-
----++ REST Call on Metadata Discovery Resource
-
-| *Call Type* | *Resource*                                                                                     | *Description*                                                                 |
-| GET         | [[MetadataList][api/metadata/discovery/:dimension-type/list]]                                  | list of dimensions  |
-| GET         | [MetadataRelations][api/metadata/discovery/:dimension-type/:dimension-name/relations]]         | Return all relations of a dimension |

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/Triage.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/Triage.twiki b/docs/src/site/twiki/restapi/Triage.twiki
deleted file mode 100644
index 9ff95c8..0000000
--- a/docs/src/site/twiki/restapi/Triage.twiki
+++ /dev/null
@@ -1,45 +0,0 @@
----++  GET api/instance/triage/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Given a feed/process instance this command traces it's ancestors to find what all ancestors have failed. It's useful if
-lot of instances are failing in a pipeline as it then finds out the root cause of the pipeline being stuck.
-
-
----++ Parameters
-   * :entity-type type of entity(feed/process).
-   * :entity-name name of the feed/process.
-   * :start instance time of the entity instance.
-   * :colo <optional param> name of the colo on which you want to triage
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-It returns a json graph
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/triage/feed/my-feed?start=2015-03-02T00:00Z&colo=local&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "vertices": ["(FEED) my-feed (2015-03-02T00:00Z) [Unavailable]", "(PROCESS) producer-process (2015-03-01T10:00Z) [TIMEDOUT]", "(FEED) input-feed-for-producer (2015-03-01T00:00Z) [Available]"],
-    "edges":
-    [
-        {
-         "from"  : "(PROCESS) producer-process (2015-03-01T10:00Z) [TIMEDOUT]",
-         "to"    : "(FEED) my-feed (2015-03-02T00:00Z) [Unavailable]",
-         "label" : "produces"
-        },
-        {
-         "from"  : "(FEED) input-feed-for-producer (2015-03-01T00:00Z) [Available]",
-         "to"    : "(PROCESS) producer-process (2015-03-01T10:00Z) [TIMEDOUT]",
-         "label" : "consumed by"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/Vertex.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/Vertex.twiki b/docs/src/site/twiki/restapi/Vertex.twiki
deleted file mode 100644
index 82f5bfb..0000000
--- a/docs/src/site/twiki/restapi/Vertex.twiki
+++ /dev/null
@@ -1,36 +0,0 @@
----++  GET api/metadata/lineage/vertices/:id
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Gets the vertex with specified id.
-
----++ Parameters
-   * :id is the unique id of the vertex.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Vertex with the specified id.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/4?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": [
-        {
-            "timestamp":"2014-04-21T20:55Z",
-            "name":"sampleIngestProcess",
-            "type":"process-instance",
-            "version":"2.0.0",
-            "_id":4,
-            "_type":"vertex"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/VertexProperties.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/VertexProperties.twiki b/docs/src/site/twiki/restapi/VertexProperties.twiki
deleted file mode 100644
index 11c64b5..0000000
--- a/docs/src/site/twiki/restapi/VertexProperties.twiki
+++ /dev/null
@@ -1,34 +0,0 @@
----++  GET api/metadata/lineage/vertices/properties/:id?relationships=:true
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Gets the properties of the vertex with specified id.
-
----++ Parameters
-   * :id is the unique id of the vertex.
-   * :relationships has default value of false. Pass true if relationships should be fetched.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
- Properties associated with the specified vertex.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/properties/40004?relationships=true&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results":
-        {
-            "timestamp":"2014-04-25T22:20Z",
-            "name":"local",
-            "type":"cluster-entity"
-        },
-    "totalSize":3
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/Vertices.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/Vertices.twiki b/docs/src/site/twiki/restapi/Vertices.twiki
deleted file mode 100644
index 643e6e9..0000000
--- a/docs/src/site/twiki/restapi/Vertices.twiki
+++ /dev/null
@@ -1,38 +0,0 @@
----++  GET api/metadata/lineage/vertices?key=:key&value=:value
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get all vertices for a key index given the specified value.
-
----++ Parameters
-   * :key is the key to be matched.
-   * :value is the associated value of the key.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-All vertices matching given property key and a value.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices?key=name&value=sampleIngestProcess&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": [
-        {
-            "timestamp":"2014-04-21T20:55Z",
-            "name":"sampleIngestProcess",
-            "type":"process-instance",
-            "version":"2.0.0",
-            "_id":4,
-            "_type":"vertex"
-        }
-    ],
-    "totalSize": 1
-}
-</verbatim>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/.gitignore
----------------------------------------------------------------------
diff --git a/falcon-regression/.gitignore b/falcon-regression/.gitignore
deleted file mode 100644
index 0cb27d0..0000000
--- a/falcon-regression/.gitignore
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Maven
-target
-
-# IntelliJ
-*.iml
-*.ipr
-*.iws
-.idea
-
-# Eclipse
-.classpath
-.project
-.settings
-.externalToolBuilders
-maven-eclipse.xml
-
-#ActiveMQ
-activemq-data
-build
-
-#log files
-logs
-
-test-output/
-bin/
-
-#Mac OS X
-.DS_Store
-*/DS_Store
-
-#hadoop-conf
-merlin/src/test/resources/hadoop-conf
-merlin/src/test/resources/hive-conf
-merlin/src/test/resources/falcon-conf
-
-#prop files
-merlin/src/main/resources/Merlin.properties
-Merlin.properties
-
-#emacs
-*~
-[#]*[#]
-.\#*

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/CHANGES.txt
----------------------------------------------------------------------
diff --git a/falcon-regression/CHANGES.txt b/falcon-regression/CHANGES.txt
deleted file mode 100644
index e3f7264..0000000
--- a/falcon-regression/CHANGES.txt
+++ /dev/null
@@ -1,598 +0,0 @@
-Apache Falcon Regression Change log
-
-Trunk (Unreleased)
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-   FALCON-1566 Add test for SLA monitoring API (Pragya Mittal)
-
-   FALCON-1567 Test case for Lifecycle feature  (Pragya Mittal)
-
-   FALCON-1784 Add regression test for for FALCON-1647 (Paul Isaychuk)
-
-   FALCON-1829 Add regression for submit and schedule process on native scheduler (time based) (Pragya Mittal)
-
-   FALCON-1766 Add CLI metrics check for HiveDR, HDFS and feed replication (Paul Isaychuk)
-
-   FALCON-1777 Add regression for HDFS replication (recipe) (Paul Isaychuk)
-
-   FALCON-1699 Test fixes for RetentionTest, LineageApiTest, TouchAPIPrismAndServerTest, FeedReplicationTest and few fortifications(Paul Isaychuk via Pragya Mittal)
-
-   FALCON-1698 New tests for ProcessSetupTest, ClusterSetupTest, UI test fixes(Paul Isaychuk via Ajay Yadava)
-
-   FALCON-1700 Add new test cases to HiveDRTest(Paul Isaychuk & Murali Ramasami via Ajay Yadava)
-
-   FALCON-1689 NoOutputProcessTest fails due to scheme missing in workflow.xml(Pragya Mittal)
-
-   FALCON-1377 Add tests in Falcon for the Triage API(Karishma Gulati via Pragya Mittal)
-
-   FALCON-1546 Add ProcessUpdateTest, PipelineInstanceDependencyTest and other tests and test fixes
-    (Raghav Gautam and Paul Isaychuk via Paul Isaychuk)
-
-   FALCON-1387 Add Instance Dependency API Test(Pragya Mittal via Ajay Yadava)
-
-   FALCON-1382 Add a test for feed retention to make sure that data directory is not deleted (Paul Isaychuk)
-
-   FALCON-1321 Add Entity Lineage Test (Pragya Mittal via Ajay Yadava)
-
-   FALCON-1319 Contribute HiveDr, Mirror tests and some test fixes (Namit Maheshwari, Paul Isaychuk,
-   Raghav Kumar Gautam & Ruslan Ostafiychuk via Raghav Kumar Gautam)
-
-   FALCON-1254 ClusterSetup UI: edit xml test cases, stabilize 2 tests (Paul Isaychuk via Ruslan Ostafiychuk)
-   
-   FALCON-1215 Adding new test cases related to rerun feature (Pragya M via Samarth Gupta)
-
-   FALCON-1249 Tests for process setup wizard (Namit Maheshwari and Paul Isaychuk)
-
-   FALCON-1242 Search UI test for entity upload button (Namit Maheshwari)
-
-   FALCON-1222 Feed Wizard multiple tests (Namit Maheshwari)
-
-   FALCON-1229 Tests for instance page on SearchUI (Ruslan Ostafiychuk)
-
-   FALCON-1216 Cluster setup wizard multiple tests (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1202 Add tests for EntityPage (Raghav Kumar Gautam)
-
-   FALCON-1210 Process Setup tests for testing header and general step default scenario (Raghav Kumar Gautam)
-
-   FALCON-1201 Feed Setup tests for testing header and default scenario (Namit Maheshwari)
-
-   FALCON-1198 Test buttons available on search results page (Ruslan Ostafiychuk)
-
-   FALCON-1187 Test that changes made via API are reflected on UI (Ruslan Ostafiychuk
-   via Raghav Kumar Gautam)
-
-   FALCON-1173 Tests for entities table of search-ui (Ruslan Ostafiychuk)
-
-   FALCON-1171 Adding search API tests (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-1167 Homepage & Login test for search-ui (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1067 Add test in falcon-regression for feed instance listing api(Karishma G 
-   via Samarth G) 
- 
-   FALCON-1066 Add test in falcon to test process SLA feature(Pragya M via Samarth G)
- 
-   FALCON-964 add test in falcon regression to test loading of jar present in user lib(Pragya M 
-   via Samarth Gupta)
-   
-   FALCON-974 add test in falcon-regression for log mover feature(Pragya via Samarth Gupta)
-   
-   FALCON-843 add test to support current & last week el expression(Pragya M via Samarth G)
-
-   FALCON-1035 Add test in falcon regression for validate feature. Validate is exposed 
-   via both server and prism (Karishma G via Samarth Gupta)
-
-   FALCON-1030 Test for Add option to search for Entities(Pragya Mittal Via Samarth Gupta)
-
-   FALCON-1043 Add test in falcon to test touch feature(Pragya Mittal via Samarth Gupta)   
-
-   FALCON-1021 Add test in falcon to test feed SLA feature (Pragya Mittal via Raghav Kumar Gautam)
-
-   FALCON-671 Add a test in falcon-regression where one oozie workflow has various actions like
-   mr, hive, pig (Karishma Gulati via Ruslan Ostafiychuk)
-
-   FALCON-985 Upgrading jsch version in falcon regression pom (Pragya M via Samarth)
-
-   FALCON-884 Add option to dump xmls generated by falcon (Raghav Kumar Gautam via Ruslan
-   Ostafiychuk)
-
-   FALCON-893 Add tests for replication to wasb filesystem
-   (Raghav Kumar Gautam and Ruslan Ostafiychuk via Arpit Gupta)
-
-   FALCON-861 Add ACL tests for falcon client and ACL update
-   (Raghav Kumar Gautam)
-
-   FALCON-844 List instances tests (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-841 Test falcon process with different frequencies
-   (Raghav Kumar Gautam)
-
-   FALCON-814 Tests for entities summary API (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-833 FeedReplicationTest.availabilityFlagTest fix (Paul Isaychuk
-   via Raghav Kumar Gautam)
-
-   FALCON-838 Add support for https in merlin (Raghav Kumar Gautam and
-   Ruslan Ostafiychuk via Raghav Kumar Gautam)
-
-   FALCON-746 Add ACL validation and enforcement tests (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-743 Adding tests for cases related to usage of pipelines tag
-   (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-589 Add test cases for various feed operations on Hcat feeds (Karishma G 
-   via Samarth Gupta)
-
-  IMPROVEMENTS
-   FALCON-1819 Improve test class entity cleanup logic (Paul Isaychuk)
-
-   FALCON-1697 Stabilization of scenarios which are based on instances lifecycle(Paul Isaychuk via Ajay Yadava)
-
-   FALCON-1543 Upgrade Falcon regression to use Active MQ 5.12(Pragya Mittal via Pallavi Rao)
-
-   FALCON-1502 Checkstyle failures in Falcon Regression(Pragya Mittal via Ajay Yadava)
-
-   FALCON-1384 AbstractEntityHelper.java cleanup (Paul Isaychuk)
-
-   FALCON-1306 Custom window dimensions for UI tests (Ruslan Ostafiychuk)
-
-   FALCON-1284 Fix entity cleanup when is_depracate=true (Ruslan Ostafiychuk via Paul Isaychuk)
-
-   FALCON-1283 Save screenshots to log.capture.location (Ruslan Ostafiychuk via Paul Isaychuk)
-
-   FALCON-1259 Fix property picked for DIFFERENT_USER_GROUP (Raghav Kumar Gautam)
-
-   FALCON-1261 ProcessSetupTest, EntitiesPatternSearchTest stabilization (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-1256 ListProcessInstancesTest needs to be stabilized (Raghav Kumar Gautam)
-
-   FALCON-1227 Add logMover check in FeedReplication test(Pragya M via Samarth G)
-
-   FALCON-1253 Fortify ExternalFSTest (Ruslan Ostafiychuk)
-
-   FALCON-1243 Feed test update as per new UI changes (Namit Maheshwari)
-
-   FALCON-1241 Fix SearchApiTest according to changes in API, tag ClusterSetupTest,
-   PrismProcessScheduleTest#testScheduleDeletedProcessOnBothColos (Paul Isaychuk)
-
-   FALCON-1226 Stabilize testActionsPauseResume() (Ruslan Ostafiychuk)
-
-   FALCON-1225 remove text line containing "Copyright" from regression 
-   log4j.properties(Samarth Gupta)
-
-   FALCON-1197 Use diamond operator in merlin and merlin-core (Ruslan Ostafiychuk)
-
-   FALCON-1131 Fixing FeedClusterUpdateTest and name confilcts in FALCON-1113(Pragya M 
-   via Samarth Gupta) 
-
-   FALCON-1145 Changing entity name if it exceeds defined length(Pragya M via Samarth G)
-
-   FALCON-1151 Migrate oozie related methods from InstanceUtil.java to OozieUtil.java
-   (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1138: JDK requirement for merlin should be 1.7 (Raghav Kumar Gautam)
-
-   FALCON-1135 Migrate methods related to *Merlin.java classes from InstanceUtil.java and
-   Bundle.java (Ruslan Ostafiychuk)
-
-   FALCON-1088 Fixing FeedDelayParallelTimeoutTest and renaming it to FeedDelayTest(Pragya M via 
-   Samarth G)
-
-   FALCON-1112 Migrate methods related to *Merlin.java classes from Util.java to their respective
-    *Merlin.java (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1113 Clean up data files in merlin resource directory. Create better names for them
-   (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1103 RetentionTest stabilization - remove check of all retention job actions
-   (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1010 Add documentation in falcon-regression's README.md, making it easy for 
-   contributors to add tests to falcon-regression(Karishma G via Samarth )
- 
-   FALCON-1011 Fix PrismProcessScheduleTest in falcon regression ( Pragya M via 
-   Samarth G)
-
-   FALCON-1075 Fixing ProcessFrequencyTest, EntityDryRunTest, ProcessInstanceRerunTest 
-   which are failing as part of falcon regression (Pragya M via Samarth G)
-
-   FALCON-1094 getAllFilesRecursivelyHDFS without recursive call (Ruslan Ostafiychuk)
-
-   FALCON-1093 Tag all new tests added to falcon-regression (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1089 ProcessInstanceStatusTest improvement (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1058 Test for Feed Replication with Empty Directories(Pragya M via Samarth Gupta)
-
-   FALCON-1046 Add test for process update with user feature(Karishma G via Samarth Gupta)
-   
-   FALCON-1017 FeedReplicationTest modified to check for _SUCCESS getting created on 
-   target directory(Pragya M via Samarth G)   
-   
-   FALCON-1040 Modifying ProcessInstanceStatusTest to expose job id for running jobs in 
-   Falcon. (Pragya M via Samarth G)
-
-   FALCON-1044 Add tests for the change that start and end are compulsory parameters for 
-   all instance POST apis.(Karishma G via Samarth)
-   
-   FALCON-1034 Add test for FALCON-677 - wherein feed data and stats paths no longer default
-   to /tmp/.(Karishma G via Samarth Gupta)
-  
-   FALCON-1014 Fix PrismProcessSnSTest in falcon regression . (Contributed by Pragya M via 
-   Samarth G)
-
-   FALCON-1022 Wait for RUNNING status of instances before killing them (Ruslan Ostafiychuk)
-
-   FALCON-968 Remove only entities submitted by same test and baseHDFSDir usage refactored
-   (Ruslan Ostafiychuk via Raghav Kumar Gautam)
-
-   FALCON-1007 Improve test output and variable names (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1009 InstanceUtil cleanup (Raghav Kumar Gautam)
-
-   FALCON-1006 Add a property for inclusion of test (Raghav Kumar Gautam)
-
-   FALCON-999 Fix PrismProcessResumeTest in falcon regression (Samarth Gupta)
-   
-   FALCON-998 Removing 'store' hard coded in regression test (Pragya M via Samarth Gupta)
-
-   FALCON-989 add timeout to EmbeddedPigScriptTest in falcon regression (Samarth Gupta)
-
-   FALCON-978 Refactoring FeedLateRerunTest to be parametrized test (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-979 Fix checkstyle issues introduced by falcon-976 (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-976 EntityDryRunTest failing in falcon trunk (Pragya M via Samarth Gupta)
-
-   FALCON-942 Cleanup BaseTestClass and setUp/tearDown methods (Ruslan Ostafiychuk)
-
-   FALCON-962 Fortify ListFeedInstancesTest (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-963 Add option to exclude tests in falcon-regression (Ruslan Ostafiychuk)
-
-   FALCON-953 Change wasb replication to run with single cluster (Ruslan Ostafiychuk
-   via Raghav Kumar Gautam)
-
-   FALCON-947 Ignore hadoop configs in merlin/src/test/resources/hadoop-conf (Ruslan Ostafiychuk)
-
-   FALCON-940 Avoid NPE in getAllEntitiesOfOneType() for zero elements
-   (Ruslan Ostafiychuk via Raghav Kumar Gautam)
-
-   FALCON-908 Remove jars that are not needed (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-939 Fixing few typos and removing unused stuff (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-930 Delete old project name (ivory) from code and xmls (Ruslan Ostafiychuk via Raghav
-   Kumar Gautam)
-
-   FALCON-927 Refactoring of entity helpers in falcon-regression (Ruslan Ostafiychuk)
-
-   FALCON-928 Use falcon's checkstyle for falcon regression code (Raghav Kumar Gautam)
-
-   FALCON-909 Remove names of the contributors from xmls and code (Ruslan Ostafiychuk)
-
-   FALCON-926 Fix problems found by findbugs in merlin and merlin-core (Ruslan Ostafiychuk and
-   Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-698 Fix checkstyle bugs in test files in falcon-regression (Ruslan Ostafiychuk and
-   Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-684 Fix problems found by checkstyle in non-test files in falcon-regression (Ruslan
-   Ostafiychuk)
-
-   FALCON-923 Remove unused xsd's (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-699 Use APIResult and other support classes from falcon-client in falcon-regression
-   (Ruslan Ostafiychuk)
-
-   FALCON-906: Add license header where it is needed, remove deprecated inmobi
-   references (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-890 Merlin tests need to be updated in secure cluster to get the
-   token using the ugi object requests (Arpit Gupta)
-
-   FALCON-863 Fix tests to have start as well as end time for kill, rerun etc
-   requests (Raghav Kumar Gautam)
-
-   FALCON-856 Handle JsonSyntaxException in hitUrl (Ruslan Ostafiychuk)
-
-   FALCON-847 Issue with login from keytab in merlin (Raghav Kumar Gautam)
-
-   FALCON-835 Tag authorization tests (Raghav Kumar Gautam)
-
-   FALCON-826 unify staging and working dir requirements (Raghav Kumar Gautam via Ruslan
-   Ostafiychuk)
-
-   FALCON-811 falcon superuser should be used instead of admin (Raghav Kumar Gautam via Ruslan
-   Ostafiychuk)
-
-   FALCON-812 all the entities must have acl (Raghav Kumar Gautam)
-
-   FALCON-801 Increasing time to wait for instance (Paul Isaychuk)
-
-   FALCON-809 Add message from response if assertSucceeded fails (Ruslan Ostafiychuk)
-
-   FALCON-802 Authorization test disabled (Paul Isaychuk)
-
-   FALCON-706 Parse json with instances to InstancesResult using joda's DateTime (Ruslan
-   Ostafiychuk)
-
-   FALCON-798 optionalTest_updateProcessMakeOptionalCompulsory fixed (Paul
-   Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-631 add late data regression test for feed and process(Pragya
-   via Samarth Gupta)
-
-   FALCON-701 HadoopUtil and Util classes documented (Paul Isaychuk via
-   Raghav Kumar Gautam)
-
-   FALCON-750 Method name fixed (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-645 add test to falcon regression for change made in default behaviour 
-   of instance request ( Karishma Via Samarth Gupta)
-
-   FALCON-745 LogUtil requires fine control over what to dump (Raghav Kumar
-   Gautam)
-
-   FALCON-739 JmsMessageConsumer should start daemon threads (Raghav Kumar
-   Gautam)
-
-   FALCON-729 HCatFeedOperationsTest should not initialize hcat client during
-   var declaration (Raghav Kumar Gautam)
-
-   FALCON-726 Expect less fields in messages in falcon-regression
-   (Ruslan Ostafiychuk)
-
-   FALCON-720 Fix ProcessInstanceStatusTest as per FALCON-166
-   (Raghav Kumar Gautam)
-
-   FALCON-693 Tests with zero inp/outp fixed to use correct workflow, javadocs
-   added (Paul Isaychuk)
-
-   FALCON-711 Test retagged (Paul Isaychuk)
-
-   FALCON-696 Pull oozie logs at the end of tests (Raghav Kumar Gautam)
-
-   FALCON-675 Request URLS moved from parameters into methods in falcon-regression (Ruslan
-   Ostafiychuk)
-
-   FALCON-656 add test in falcon regression's Authorization test where non-feed owner updates
-   a feed with a dependent process(Karishma via Samarth Gupta)
-
-   FALCON-674 General code factored out for ProcessInstance* tests (Paul Isaychuk via Ruslan
-   Ostafiychuk)
-
-   FALCON-657 String datePattern moved to BaseTestClass (Ruslan Ostafiychuk)
-
-   FALCON-643 Tests with zero-output/input scenario amended to match test case (Paul Isaychuk via
-   Ruslan Ostafiychuk)
-
-   FALCON-660 7 test classes refactored and few of them documented (Paul Isaychuk via
-   Ruslan Ostafiychuk)
-
-   FALCON-653 Add falcon regression test for zero input process(Karishma via Samarth Gupta)
-   FALCON-655 Skip workflow upload if process won't be submitted (Ruslan Ostafiychuk)
-
-   FALCON-587 Don't delete input data in @AfterClass in falcon-regression tests if
-   clean_test_dir=false (Ruslan Ostafiychuk)
-
-   FALCON-646 Refactoring, documentation stuff (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-572 HadoopUtil cleanup in falcon-regression (Ruslan Ostafiychuk via Samarth Gupta)
-   FALCON-632 Refactoring, documentation stuff (Paul Isaychuk via Samarth Gupta)
-
-   FALCON-609 UpdateAtSpecificTimeTest, InstanceSummaryTest tagged, fixed, refactored
-   (Paul Isaychuk via Samarth Gupta)
-
-   FALCON-619 ELExp_FutureAndLatestTest stabilization (Paul Isaychuk via Arpit Gupta)
-
-   FALCON-610 Refactoring and documentation updates (Paul Isaychuk via Arpit Gupta)
-
-   FALCON-581 Refactor code for cross product and make it a method 
-   (Raghav Kumar Gautam via Arpit Gupta) 
-   
-   FALCON-597 String logged at the start and end of the merlin test are slightly 
-   mismatched (Raghav Kumar Gautam via Arpit Gupta)
-
-  OPTIMIZATIONS
-   FALCON-681 delete duplicate feed retention test from falcon regression (SamarthG)
-
-  BUG FIXES
-   FALCON-1783 Fix ProcessUpdateTest and SearchApiTest to use prism (Paul Isaychuk)
-
-   FALCON-1816 Fix findbugs-exclude.xml path and hadoop version in falcon-regression pom (Paul Isaychuk via Ajay Yadava)
-
-   FALCON-1701 HiveDr, ClusterSetupTest, MirrorSummaryTest fixes(Murali Ramasami via Ajay Yadava)
-
-   FALCON-1489 Partial status http response code returns 200(Pragya Mittal via Ajay Yadava)
-
-   FALCON-1388 Fix merge conflicts produced by FALCON-1002 (Paul Isaychuk)
-
-   FALCON-1002 Disable UpdateAtSpecific time test from falcon regression(Samarth Gupta via Ajay Yadava)
-
-   FALCON-1376 Fixing FeedDelayTest(Pragya Mittal via Ajay Yadava)
-
-   FALCON-1332 Fortify ClusterSetupTest#testRegistryInterface - if checkbox wasn't clicked repeat an attempt
-   (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-1338 Fortify ProcessInstanceRerunTest#testProcessInstanceRerunMultipleSucceeded (Paul Isaychuk)
-
-   FALCON-1318 Method waitForAngularToFinish() should be robust to unresponsive browser (Raghav Kumar Gautam)
-
-   FALCON-1314 Unify handling of local files and directory (Raghav Kumar Gautam)
-
-   FALCON-1278 Submitted entity properties must have unique names (Raghav Kumar Gautam)
-
-   FALCON-1300 Use xml instead of properties for log4j (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1290 Push Enter in start/end filter on entity page, start instance is included (Ruslan Ostafiychuk)
-
-   FALCON-1289 Select instances again after performing action on entity page (Ruslan Ostafiychuk)
-
-   FALCON-1292 Avoid usage of hard coded feed names in Bundle class (Paul Isaychuk)
-
-   FALCON-1281 Fix InstancePageTest#testInstancePageStatusWaitingRunning (Namit Maheshwari via Ruslan Ostafiychuk)
-
-   FALCON-1287 Use listAllEntities() instead of listEntities() (Ruslan Ostafiychuk via Paul Isaychuk)
-
-   FALCON-1288 Fortify expressionLanguageTest (Ruslan Ostafiychuk via Paul Isaychuk)
-
-   FALCON-1285 Don't reduce process name in ProcessSetupTest (Ruslan Ostafiychuk)
-
-   FALCON-1277 Fix SearchApiTest according to api changes (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1275 PrismFeedUpdateTest is using root directory (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1274 PrismProcessScheduleTest is using root directory (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1273 ProcessLateRerunTest still using root directory (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1272 InstanceParamTest still using root directory (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1269 Update InstancePageTest and other after UI changes (Ruslan Ostafiychuk)
-
-   FALCON-1270 SearchUI. Login screen changed to not have password field (Namit Maheshwari via Ruslan Ostafiychuk)
-
-   FALCON-1266 LineageApiTest test fixes (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1265 ListProcessInstancesTest test fixes (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-1267 Fix Processsetuptest#testinoutstepdropdownfeeds (Namit Maheshwari via Raghav Kumar Gautam)
-
-   FALCON-1264 Fix validity parsing in ProcessWizardPage (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1263 Fix waitTillInstanceReachState for EntityType.PROCESS (Ruslan Ostafiychuk)
-
-   FALCON-1258 Fix feed validity and fortify ELExpFutureAndLatestTest (Ruslan Ostafiychuk)
-
-   FALCON-1257 Fix feedAclUpdate and processAclUpdate (Ruslan Ostafiychuk)
-
-   FALCON-1255 Fix list of expected statuses in drop down on Entity Page (Ruslan Ostafiychuk)
-
-   FALCON-1246 Remove wrong parameter from update signature, delete related tests (Paul Isaychuk)
-
-   FALCON-1221 RescheduleProcessInFinalStatesTest.checkNotFoundDefinition() needs to be case flexible
-   (Raghav Kumar Gautam)
-
-   FALCON-1218 Minor fix for testFeedListingAfterFeedDataPathUpdate & testFeedListingAfterFeedAvailabilityFlagUpdate
-   (Raghav Kumar Gautam)
-
-   FALCON-1183 Fix expected response in testScheduleDeletedFeedOnBothColosUsingColoHelper
-   (Ruslan Ostafiychuk)
-
-   FALCON-1176 Fixing EntitiesPatternSearchTest failing as a result of FALCON-1158 (Pragya Mittal
-   via Ruslan Ostafiychuk)
-
-   FALCON-1177 Changing method modifier to make it be not concerned as a test (Paul Isaychuk via
-   Ruslan Ostafiychuk)
-
-   FALCON-1166 Fixing FeedReplicationTest, TouchAPIPrismAndServerTest, ProcessLibPathLoadTest
-   (Pragya Mittal via Ruslan Ostafiychuk)
-
-   FALCON-1155 Unwanted character at the end of workflow.xml (Karishma Gulati via Ruslan Ostafiychuk)
-
-   FALCON-1072 Dumping of Oozie info should use os specific newline (Raghav Kumar Gautam)
-
-   FALCON-1018 Enable and fix FalconClientTest#badClusterSubmit (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-1005 In ProcessInstanceSuspendTest clean test dir after each method execution
-   (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-990 Fix and enable 3/4 tests of RescheduleProcessInFinalStatesTest (Ruslan Ostafiychuk)
-
-   FALCON-951 Adding wait till instance goes into running for tests in falcon trunk
-   (Pragya Mittal via Raghav Kumar Gautam)
-
-   FALCON-955 enable and fix feedAclUpdate and processAclUpdate tests (Raghav Kumar Gautam via
-   Ruslan Ostafiychuk)
-
-   FALCON-948 Enabling late rerun tests (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-956 Fix testProcessInstanceStatusTimedOut (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-937 Fix tests that are still using hdfs root dir in feeds (Raghav Kumar Gautam
-   via Ruslan Ostafiychuk)
-
-   FALCON-936 Replace deprecated WorkflowJob.getGroup() with getAcl().
-   (Ruslan Ostafiychuk via Raghav Kumar Gautam)
-
-   FALCON-895 Fix intermittent failures in ProcessInstanceStatus and KillsTest classes.
-   (Paul Isaychuk via Raghav Kumar Gautam)
-
-   FALCON-891 Removing some more invalid acl tests (Raghav Kumar Gautam)
-
-   FALCON-888 Disabling test for special chars in pipeline name and
-   RescheduleProcessInFinalStatesTest (Paul Isaychuk)
-
-   FALCON-886 Disable tests that are failing (Raghav Kumar Gautam)
-
-   FALCON-883 Fixing testFeedCustomFilter() (Paul Isaychuk)
-
-   FALCON-879 Update ACL tests according to info provided in FALCON-864
-   (Raghav Kumar Gautam)
-
-   FALCON-876 Reducing fail time for HCatProcessTest and
-   PrismFeedReplicationUpdateTest (Raghav Kumar Gautam)
-
-   FALCON-873 BeforeClass fails in falcon-regression if "hadoop.rpc.protection"
-    is not set(Samarth Gupta)
-
-   FALCON-871 Merlin clusters should have hadoop.rpc.protection property
-   (Raghav Kumar Gautam)
-
-   FALCON-865 Rest end-point entities/summary has changed (Ruslan Ostafiychuk)
-
-   FALCON-867 In process ACL tests process validity needs to be in future
-   (Raghav Kumar Gautam)
-
-   FALCON-866 Lineage endpoints need to be changed for merlin
-   (Raghav Kumar Gautam)
-
-   FALCON-855 Fix validation in ProcessInstanceKillsTest#testProcessInstanceKillStartAndEndSame
-   (Ruslan Ostafiychuk)
-
-   FALCON-860 Add label-types for labels added as part of EntitySummaryTest (Raghav Kumar Gautam
-   via Ruslan Ostafiychuk)
-
-   FALCON-852 Merlin function testProcessOrderBy() needs to use string ordering instead of enum
-   ordering (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-848 ListEntitiesTest tagged and checkstyle bugs fixed (Ruslan Ostafiychuk)
-
-   FALCON-832 more edge labels and node type for lineage (Raghav Kumar Gautam via Ruslan
-   Ostafiychuk)
-
-   FALCON-797 ProcessInstanceStatusTest#testProcessInstanceStatusEndOutOfRange fixed (Paul
-   Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-779 ProcessInstanceStatusTest#testProcessInstanceStatusReverseDateRange fixed (Paul
-   Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-776 Fixing PrismFeedUpdateTest and FeedInstanceStatus test issues (Raghav Kumar Gautam
-   via Ruslan Ostafiychuk)
-
-   FALCON-747 validateSuccessOnlyStart should use '1' as expected value (Ruslan Ostafiychuk)
-
-   FALCON-737 In NoOutputProcessTest print info before assertion (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-
-   FALCON-736 OneNonCatInputOneHCatOutput test fails because of change in parameter name (Raghav Kumar Gautam
-   via Ruslan Ostafiychuk)
-
-   FALCON-733 HCatProcess tests fail because _SUCCESS file is non-zero size (Raghav Kumar Gautam
-   via Ruslan Ostafiychuk)
-
-   FALCON-723 Fix ProcessInstanceResumeTest and ProcessInstanceSuspendTest as per FALCON-166 (Ruslan Ostafiychuk)
-
-   FALCON-715 "end" parameter should be greater than last instances startTime (Ruslan Ostafiychuk)
-
-   FALCON-716 PrismFeedSuspendTest tests retagged as distributed (Ruslan Ostafiychuk)
-
-   FALCON-713 UpdateAtSpecificTimeTest#updateTimeInPast_Feed should submitAndSchedule feed rather
-   then submit (Paul Isaychuk via Ruslan Ostafiychuk)
-
-   FALCON-707 ProcessInstanceSuspendTest is failing because input path is not set correctly
-   (Raghav Kumar Gautam via Ruslan Ostafiychuk)
-


[16/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/LineageHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/LineageHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/LineageHelper.java
deleted file mode 100644
index 7726c9e..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/LineageHelper.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers;
-
-import com.google.gson.GsonBuilder;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.regression.core.response.lineage.Direction;
-import org.apache.falcon.regression.core.response.lineage.EdgeResult;
-import org.apache.falcon.regression.core.response.lineage.EdgesResult;
-import org.apache.falcon.regression.core.response.lineage.Vertex;
-import org.apache.falcon.regression.core.response.lineage.VertexIdsResult;
-import org.apache.falcon.regression.core.response.lineage.VertexResult;
-import org.apache.falcon.regression.core.response.lineage.VerticesResult;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.GraphAssert;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.request.BaseRequest;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.http.HttpResponse;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.EnumSet;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- *  Class with helper functions to test lineage feature.
- */
-public class LineageHelper {
-    private static final Logger LOGGER = Logger.getLogger(LineageHelper.class);
-    private final String hostname;
-
-    /**
-     * Lineage related REST endpoints.
-     */
-    public enum URL {
-        SERIALIZE("/api/metadata/lineage/serialize"),
-        VERTICES("/api/metadata/lineage/vertices"),
-        VERTICES_ALL("/api/metadata/lineage/vertices/all"),
-        VERTICES_PROPERTIES("/api/metadata/lineage/vertices/properties"),
-        EDGES("/api/metadata/lineage/edges"),
-        EDGES_ALL("/api/metadata/lineage/edges/all");
-
-        private final String url;
-
-        URL(String url) {
-            this.url = url;
-        }
-
-        public String getValue() {
-            return this.url;
-        }
-    }
-
-    /**
-     * Create a LineageHelper to use with a specified hostname.
-     * @param hostname hostname
-     */
-    public LineageHelper(String hostname) {
-        this.hostname = hostname.trim().replaceAll("/$", "");
-    }
-
-    /**
-     * Create a LineageHelper to use with a specified prismHelper.
-     * @param prismHelper prismHelper
-     */
-    public LineageHelper(ColoHelper prismHelper) {
-        this(prismHelper.getClusterHelper().getHostname());
-    }
-
-    /**
-     * Extract response string from the response object.
-     * @param response the response object
-     * @return the response string
-     * @throws IOException
-     */
-    public String getResponseString(HttpResponse response) throws IOException {
-        return IOUtils.toString(response.getEntity().getContent(), "UTF-8");
-    }
-
-    /**
-     * Run a get request on the specified url.
-     * @param url url
-     * @return response of the request
-     * @throws URISyntaxException
-     * @throws IOException
-     * @throws AuthenticationException
-     */
-    public HttpResponse runGetRequest(String url)
-        throws URISyntaxException, IOException, AuthenticationException, InterruptedException {
-        final BaseRequest request = new BaseRequest(url, "get", null);
-        return request.run();
-    }
-
-    /**
-     * Successfully run a get request on the specified url.
-     * @param url url
-     * @return string response of the request
-     * @throws URISyntaxException
-     * @throws IOException
-     * @throws AuthenticationException
-     */
-    public String runGetRequestSuccessfully(String url)
-        throws URISyntaxException, IOException, AuthenticationException, InterruptedException {
-        HttpResponse response = runGetRequest(url);
-        String responseString = getResponseString(response);
-        LOGGER.info(Util.prettyPrintXmlOrJson(responseString));
-        Assert.assertEquals(response.getStatusLine().getStatusCode(), 200,
-                "The get request  was expected to be successfully");
-        return responseString;
-    }
-
-    /**
-     * Create a full url for the given lineage endpoint, urlPath and parameter.
-     * @param url        lineage endpoint
-     * @param urlPath    url path to be added to lineage endpoint
-     * @param paramPairs parameters to be passed
-     * @return url string
-     */
-    public String getUrl(final URL url, final String urlPath, final Map<String,
-            String> paramPairs) {
-        Assert.assertNotNull(hostname, "Hostname can't be null.");
-        String hostAndPath = hostname + url.getValue();
-        if (urlPath != null) {
-            hostAndPath += "/" + urlPath;
-        }
-        if (paramPairs != null && paramPairs.size() > 0) {
-            String[] params = new String[paramPairs.size()];
-            int i = 0;
-            for (Map.Entry<String, String> entry : paramPairs.entrySet()) {
-                params[i++] = entry.getKey() + '=' + entry.getValue();
-            }
-            return hostAndPath + "/?" + StringUtils.join(params, "&");
-        }
-        return hostAndPath;
-    }
-
-    /**
-     * Create a full url for the given lineage endpoint, urlPath and parameter.
-     * @param url     lineage endpoint
-     * @param urlPath url path to be added to lineage endpoint
-     * @return url string
-     */
-    public String getUrl(final URL url, final String urlPath) {
-        return getUrl(url, urlPath, null);
-    }
-
-    /**
-     * Create a full url for the given lineage endpoint and parameter.
-     * @param url        lineage endpoint
-     * @param paramPairs parameters to be passed
-     * @return url string
-     */
-    public String getUrl(final URL url, final Map<String, String> paramPairs) {
-        return getUrl(url, null, paramPairs);
-    }
-
-    /**
-     * Create a full url for the given lineage endpoint and parameter.
-     * @param url lineage endpoint
-     * @return url string
-     */
-    public String getUrl(final URL url) {
-        return getUrl(url, null, null);
-    }
-
-    /**
-     * Create url path from parts.
-     * @param pathParts parts of the path
-     * @return url path
-     */
-    public String getUrlPath(String... pathParts) {
-        return StringUtils.join(pathParts, "/");
-    }
-
-    /**
-     * Create url path from parts.
-     * @param oneInt    part of the path
-     * @param pathParts parts of the path
-     * @return url path
-     */
-    public String getUrlPath(int oneInt, String... pathParts) {
-        return oneInt + "/" + getUrlPath(pathParts);
-    }
-
-    /**
-     * Get result of the supplied type for the given url.
-     * @param url url
-     * @return result of the REST request
-     */
-    public <T> T getResultOfType(String url, Class<T> clazz) {
-        String responseString = null;
-        try {
-            responseString = runGetRequestSuccessfully(url);
-        } catch (URISyntaxException | InterruptedException | AuthenticationException | IOException e) {
-            AssertUtil.fail(e);
-        }
-        return new GsonBuilder().create().fromJson(responseString, clazz);
-    }
-
-    /**
-     * Get vertices result for the url.
-     * @param url url
-     * @return result of the REST request
-     */
-    public VerticesResult getVerticesResult(String url) {
-        return getResultOfType(url, VerticesResult.class);
-    }
-
-    /**
-     * Get vertex result for the url.
-     * @param url url
-     * @return result of the REST request
-     */
-    private VertexResult getVertexResult(String url) {
-        return getResultOfType(url, VertexResult.class);
-    }
-
-    /**
-     * Get vertex id result for the url.
-     * @param url url
-     * @return result of the REST request
-     */
-    private VertexIdsResult getVertexIdsResult(String url) {
-        return getResultOfType(url, VertexIdsResult.class);
-    }
-
-    /**
-     * Get all the vertices.
-     * @return all the vertices
-     */
-    public VerticesResult getAllVertices() {
-        return getVerticesResult(getUrl(URL.VERTICES_ALL));
-    }
-
-    public VerticesResult getVertices(Vertex.FilterKey key, String value) {
-        Map<String, String> params = new TreeMap<>();
-        params.put("key", key.toString());
-        params.put("value", value);
-        return getVerticesResult(getUrl(URL.VERTICES, params));
-    }
-
-    public VertexResult getVertexById(int vertexId) {
-        return getVertexResult(getUrl(URL.VERTICES, getUrlPath(vertexId)));
-    }
-
-    public VertexResult getVertexProperties(int vertexId) {
-        return getVertexResult(getUrl(URL.VERTICES_PROPERTIES, getUrlPath(vertexId)));
-    }
-
-    public VerticesResult getVerticesByType(Vertex.VERTEX_TYPE vertexType) {
-        return getVertices(Vertex.FilterKey.type, vertexType.getValue());
-    }
-
-    public VerticesResult getVerticesByName(String name) {
-        return getVertices(Vertex.FilterKey.name, name);
-    }
-
-    public VerticesResult getVerticesByDirection(int vertexId, Direction direction) {
-        Assert.assertTrue((EnumSet.of(Direction.bothCount, Direction.inCount, Direction.outCount,
-                Direction.bothVertices, Direction.inComingVertices,
-                Direction.outgoingVertices).contains(direction)),
-                "Vertices requested.");
-        return getVerticesResult(getUrl(URL.VERTICES, getUrlPath(vertexId, direction.getValue())));
-    }
-
-    public VertexIdsResult getVertexIdsByDirection(int vertexId, Direction direction) {
-        Assert.assertTrue((EnumSet.of(Direction.bothVerticesIds, Direction.incomingVerticesIds,
-                Direction.outgoingVerticesIds).contains(direction)),
-                "Vertex Ids requested.");
-        return getVertexIdsResult(getUrl(URL.VERTICES, getUrlPath(vertexId, direction.getValue())));
-    }
-
-    public Vertex getVertex(String vertexName) {
-        final VerticesResult clusterResult = getVerticesByName(vertexName);
-        GraphAssert.assertVertexSanity(clusterResult);
-        Assert.assertEquals(clusterResult.getTotalSize(), 1,
-                "Expected one node for vertex name:" + vertexName);
-        return clusterResult.getResults().get(0);
-    }
-
-    /**
-     * Get edges result for the url.
-     * @param url url
-     * @return result of the REST request
-     */
-    private EdgesResult getEdgesResult(String url) {
-        return getResultOfType(url, EdgesResult.class);
-    }
-
-    private EdgeResult getEdgeResult(String url) {
-        return getResultOfType(url, EdgeResult.class);
-    }
-
-    public EdgesResult getEdgesByDirection(int vertexId, Direction direction) {
-        Assert.assertTrue((EnumSet.of(Direction.bothEdges, Direction.inComingEdges,
-            Direction.outGoingEdges).contains(direction)), "Vertices requested.");
-        return getEdgesResult(getUrl(URL.VERTICES, getUrlPath(vertexId, direction.getValue())));
-    }
-
-    public EdgesResult getAllEdges() {
-        return getEdgesResult(getUrl(URL.EDGES_ALL));
-    }
-
-    public EdgeResult getEdgeById(String edgeId) {
-        return getEdgeResult(getUrl(URL.EDGES, getUrlPath(edgeId)));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/AbstractEntityHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/AbstractEntityHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/AbstractEntityHelper.java
deleted file mode 100644
index e1a9288..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/AbstractEntityHelper.java
+++ /dev/null
@@ -1,733 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers.entity;
-
-import com.jcraft.jsch.JSchException;
-import org.apache.commons.exec.CommandLine;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.regression.core.helpers.FalconClientBuilder;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.supportClasses.ExecResult;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.regression.core.util.ExecUtil;
-import org.apache.falcon.regression.core.util.FileUtil;
-import org.apache.falcon.regression.core.util.HCatUtil;
-import org.apache.falcon.regression.core.util.HiveUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.core.util.Util.URLS;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.InstanceDependencyResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-import org.apache.falcon.resource.TriageResult;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.common.HCatException;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.List;
-
-/** Abstract class for helper classes. */
-public abstract class AbstractEntityHelper {
-
-    private static final Logger LOGGER = Logger.getLogger(AbstractEntityHelper.class);
-
-    //basic properties
-    private String qaHost;
-    private String hostname = "";
-    private String username = "";
-    private String password = "";
-    private String hadoopLocation = "";
-    private String hadoopURL = "";
-    private String clusterReadonly = "";
-    private String clusterWrite = "";
-    private String oozieURL = "";
-    private String activeMQ = "";
-    private String storeLocation = "";
-    private String colo;
-    private String allColo;
-    private String coloName;
-    //hive jdbc
-    private String hiveJdbcUrl = "";
-    private String hiveJdbcUser = "";
-    private String hiveJdbcPassword = "";
-    private Connection hiveJdbcConnection;
-    //clients
-    private OozieClient oozieClient;
-    private String hcatEndpoint = "";
-    private HCatClient hCatClient;
-    private FileSystem hadoopFS;
-    //other properties
-    private String namenodePrincipal;
-    private String hiveMetaStorePrincipal;
-    private String identityFile;
-    private String serviceUser;
-    private String serviceStartCmd;
-    private String serviceStopCmd;
-    private String serviceStatusMsg;
-    private String serviceStatusCmd;
-
-    public AbstractEntityHelper(String prefix) {
-        if ((null == prefix) || prefix.isEmpty()) {
-            prefix = "";
-        } else {
-            prefix += ".";
-        }
-        this.qaHost = Config.getProperty(prefix + "qa_host");
-        this.hostname = Config.getProperty(prefix + "hostname");
-        this.username = Config.getProperty(prefix + "username", System.getProperty("user.name"));
-        this.password = Config.getProperty(prefix + "password", "");
-        this.hadoopLocation = Config.getProperty(prefix + "hadoop_location");
-        this.hadoopURL = Config.getProperty(prefix + "hadoop_url");
-        this.hcatEndpoint = Config.getProperty(prefix + "hcat_endpoint");
-        this.clusterReadonly = Config.getProperty(prefix + "cluster_readonly");
-        this.clusterWrite = Config.getProperty(prefix + "cluster_write");
-        this.oozieURL = Config.getProperty(prefix + "oozie_url");
-        this.activeMQ = Config.getProperty(prefix + "activemq_url");
-        this.storeLocation = Config.getProperty(prefix + "storeLocation");
-        this.allColo = "?colo=" + Config.getProperty(prefix + "colo", "*");
-        this.colo = (!Config.getProperty(prefix + "colo", "").isEmpty()) ? "?colo=" + Config
-            .getProperty(prefix + "colo") : "";
-        this.coloName = this.colo.contains("=") ? this.colo.split("=")[1] : "";
-        this.serviceStartCmd =
-            Config.getProperty(prefix + "service_start_cmd", "/etc/init.d/tomcat6 start");
-        this.serviceStopCmd = Config.getProperty(prefix + "service_stop_cmd",
-            "/etc/init.d/tomcat6 stop");
-        this.serviceUser = Config.getProperty(prefix + "service_user", null);
-        this.serviceStatusMsg = Config.getProperty(prefix + "service_status_msg",
-            "Tomcat servlet engine is running with pid");
-        this.serviceStatusCmd =
-            Config.getProperty(prefix + "service_status_cmd", "/etc/init.d/tomcat6 status");
-        this.identityFile = Config.getProperty(prefix + "identityFile",
-            System.getProperty("user.home") + "/.ssh/id_rsa");
-        this.hadoopFS = null;
-        this.oozieClient = null;
-        this.namenodePrincipal = Config.getProperty(prefix + "namenode.kerberos.principal", "none");
-        this.hiveMetaStorePrincipal = Config.getProperty(
-            prefix + "hive.metastore.kerberos.principal", "none");
-        this.hiveJdbcUrl = Config.getProperty(prefix + "hive.jdbc.url", "none");
-        this.hiveJdbcUser =
-            Config.getProperty(prefix + "hive.jdbc.user", System.getProperty("user.name"));
-        this.hiveJdbcPassword = Config.getProperty(prefix + "hive.jdbc.password", "");
-    }
-
-    public String getActiveMQ() {
-        return activeMQ;
-    }
-
-    public String getHadoopLocation() {
-        return hadoopLocation;
-    }
-
-    public String getHadoopURL() {
-        return hadoopURL;
-    }
-
-    public String getClusterReadonly() {
-        return clusterReadonly;
-    }
-
-    public String getClusterWrite() {
-        return clusterWrite;
-    }
-
-    public String getHostname() {
-        return hostname;
-    }
-
-    public String getPassword() {
-        return password;
-    }
-
-    public String getStoreLocation() {
-        return storeLocation;
-    }
-
-    public String getUsername() {
-        return username;
-    }
-
-    public String getHCatEndpoint() {
-        return hcatEndpoint;
-    }
-
-    public String getQaHost() {
-        return qaHost;
-    }
-
-    public String getIdentityFile() {
-        return identityFile;
-    }
-
-    public String getServiceUser() {
-        return serviceUser;
-    }
-
-    public String getServiceStopCmd() {
-        return serviceStopCmd;
-    }
-
-    public String getServiceStartCmd() {
-        return serviceStartCmd;
-    }
-
-    public String getColo() {
-        return colo;
-    }
-
-    public String getColoName() {
-        return coloName;
-    }
-
-    public abstract String getEntityType();
-
-    public abstract String getEntityName(String entity);
-
-    public String getNamenodePrincipal() {
-        return namenodePrincipal;
-    }
-
-    public String getHiveMetaStorePrincipal() {
-        return hiveMetaStorePrincipal;
-    }
-
-    public HCatClient getHCatClient() {
-        if (null == this.hCatClient) {
-            try {
-                this.hCatClient = HCatUtil.getHCatClient(hcatEndpoint, hiveMetaStorePrincipal);
-            } catch (HCatException e) {
-                Assert.fail("Unable to create hCatClient because of exception:\n"
-                    + ExceptionUtils.getStackTrace(e));
-            }
-        }
-        return this.hCatClient;
-    }
-
-    public Connection getHiveJdbcConnection() {
-        if (null == hiveJdbcConnection) {
-            try {
-                hiveJdbcConnection =
-                    HiveUtil.getHiveJdbcConnection(hiveJdbcUrl, hiveJdbcUser, hiveJdbcPassword, hiveMetaStorePrincipal);
-            } catch (ClassNotFoundException | SQLException | InterruptedException | IOException e) {
-                Assert.fail("Unable to create hive jdbc connection because of exception:\n"
-                    + ExceptionUtils.getStackTrace(e));
-            }
-        }
-        return hiveJdbcConnection;
-    }
-
-    public OozieClient getOozieClient() {
-        if (null == this.oozieClient) {
-            this.oozieClient = OozieUtil.getClient(this.oozieURL);
-        }
-        return this.oozieClient;
-    }
-
-    public FileSystem getHadoopFS() throws IOException {
-        if (null == this.hadoopFS) {
-            Configuration conf = new Configuration();
-            conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-            conf.set("fs.default.name", "hdfs://" + this.hadoopURL);
-            this.hadoopFS = FileSystem.get(conf);
-        }
-        return this.hadoopFS;
-    }
-
-    private String createUrl(String... parts) {
-        return StringUtils.join(parts, "/");
-    }
-
-    public ServiceResponse listEntities(String entityType, String params, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        if (StringUtils.isEmpty(entityType)) {
-            entityType = getEntityType();
-        }
-        LOGGER.info("fetching " + entityType + " list");
-        String url = createUrl(this.hostname + URLS.LIST_URL.getValue(), entityType + colo);
-        if (StringUtils.isNotEmpty(params)){
-            url += colo.isEmpty() ? "?" + params : "&" + params;
-        }
-        return Util.sendRequest(createUrl(url), "get", null, user);
-    }
-
-    public ServiceResponse listAllEntities()
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        return listAllEntities(null, null);
-    }
-
-    public ServiceResponse listAllEntities(String params, String user)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        return listEntities(null, (params == null ? "" : params + '&')
-            + "numResults=" + Integer.MAX_VALUE, user);
-    }
-
-    public ServiceResponse submitEntity(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return submitEntity(data, null);
-    }
-
-    public ServiceResponse validateEntity(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return validateEntity(data, null);
-    }
-
-    public ServiceResponse submitEntity(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        LOGGER.info("Submitting " + getEntityType() + ": \n" + Util.prettyPrintXml(data));
-        return Util.sendRequest(createUrl(this.hostname + URLS.SUBMIT_URL.getValue(), getEntityType() + colo), "post",
-                data, user);
-    }
-
-    public ServiceResponse validateEntity(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        LOGGER.info("Validating " + getEntityType() + ": \n" + Util.prettyPrintXml(data));
-        return Util.sendRequest(createUrl(this.hostname + URLS.VALIDATE_URL.getValue(), getEntityType() + colo), "post",
-                data, user);
-    }
-
-    public ServiceResponse schedule(String processData)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return schedule(processData, null, "");
-    }
-
-    public ServiceResponse schedule(String data, String user, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-
-        String url = createUrl(this.hostname + URLS.SCHEDULE_URL.getValue(), getEntityType(),
-                getEntityName(data) + colo);
-        if (StringUtils.isNotBlank(params)) {
-            url += (colo.isEmpty() ? "?" : "&") + params;
-        }
-        LOGGER.info("url is : " + url);
-        return Util.sendRequest(createUrl(url), "post", data, user);
-    }
-
-    public ServiceResponse submitAndSchedule(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return submitAndSchedule(data, null, "");
-    }
-
-    public ServiceResponse submitAndSchedule(String data, String user, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        LOGGER.info("Submitting " + getEntityType() + ": \n" + Util.prettyPrintXml(data));
-
-        String url = createUrl(this.hostname + URLS.SUBMIT_AND_SCHEDULE_URL.getValue(), getEntityType() + colo);
-        if (StringUtils.isNotBlank(params)) {
-            url += (colo.isEmpty() ? "?" : "&") + params;
-        }
-        return Util.sendRequest(createUrl(url), "post", data, user);
-    }
-
-    public ServiceResponse deleteByName(String entityName, String user)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        return Util.sendRequest(
-                createUrl(this.hostname + URLS.DELETE_URL.getValue(), getEntityType(), entityName + colo), "delete",
-                user);
-    }
-
-    public ServiceResponse delete(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return delete(data, null);
-    }
-
-    public ServiceResponse delete(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(
-                createUrl(this.hostname + URLS.DELETE_URL.getValue(), getEntityType(), getEntityName(data) + colo),
-                "delete", user);
-    }
-
-    public ServiceResponse suspend(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return suspend(data, null);
-    }
-
-    public ServiceResponse suspend(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(createUrl(this.hostname + URLS.SUSPEND_URL.getValue(),
-            getEntityType(), getEntityName(data) + colo), "post", user);
-    }
-
-    public ServiceResponse resume(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return resume(data, null);
-    }
-
-    public ServiceResponse resume(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(createUrl(this.hostname + URLS.RESUME_URL.getValue(),
-            getEntityType(), getEntityName(data) + colo), "post", user);
-    }
-
-    public ServiceResponse getStatus(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getStatus(data, null);
-    }
-
-    public ServiceResponse getStatus(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(createUrl(this.hostname + URLS.STATUS_URL.getValue(),
-            getEntityType(), getEntityName(data) + colo), "get", user);
-    }
-
-    public ServiceResponse getEntityDefinition(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getEntityDefinition(data, null);
-    }
-
-    public ServiceResponse getEntityDefinition(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(createUrl(this.hostname + URLS.GET_ENTITY_DEFINITION.getValue(),
-            getEntityType(), getEntityName(data) + colo), "get", user);
-    }
-
-    public ServiceResponse getEntityDependencies(String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return Util.sendRequest(
-                createUrl(this.hostname + URLS.DEPENDENCIES.getValue(), getEntityType(), getEntityName(data) + colo),
-                "get", user);
-    }
-
-    public InstancesResult getRunningInstance(String name)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getRunningInstance(name, null);
-    }
-
-    public InstancesResult getRunningInstance(String name, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_RUNNING.getValue(), getEntityType(),
-            name + allColo);
-        return (InstancesResult) InstanceUtil.sendRequestProcessInstance(url, user);
-    }
-
-    public InstancesResult getProcessInstanceStatus(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceStatus(entityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceStatus(
-        String entityName, String params, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_STATUS.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public InstancesResult getProcessInstanceLogs(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceLogs(entityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceLogs(String entityName, String params,
-                                                  String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_LOGS.getValue(), getEntityType(),
-            entityName);
-        if (StringUtils.isNotEmpty(params)) {
-            url += "?";
-        }
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public InstancesResult getProcessInstanceSuspend(
-        String readEntityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceSuspend(readEntityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceSuspend(
-        String entityName, String params, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_SUSPEND.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public ServiceResponse update(String oldEntity, String newEntity)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return update(oldEntity, newEntity, null);
-    }
-
-    public ServiceResponse update(String oldEntity, String newEntity, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        LOGGER.info("Updating " + getEntityType() + ": \n" + Util.prettyPrintXml(oldEntity));
-        LOGGER.info("To " + getEntityType() + ": \n" + Util.prettyPrintXml(newEntity));
-        String url = createUrl(this.hostname + URLS.UPDATE.getValue(), getEntityType(),
-            getEntityName(oldEntity));
-        return Util.sendRequest(url + colo, "post", newEntity, user);
-    }
-
-    public InstancesResult getProcessInstanceKill(String readEntityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceKill(readEntityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceKill(String entityName, String params,
-                                                         String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_KILL.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public InstancesResult getProcessInstanceRerun(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceRerun(entityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceRerun(String entityName, String params,
-                                                          String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_RERUN.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public InstancesResult getProcessInstanceResume(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getProcessInstanceResume(entityName, params, null);
-    }
-
-    public InstancesResult getProcessInstanceResume(String entityName, String params,
-                                                           String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_RESUME.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public FeedInstanceResult getFeedInstanceListing(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return getFeedInstanceListing(entityName, params, null);
-    }
-
-    public FeedInstanceResult getFeedInstanceListing(String entityName, String params,
-                                                     String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_LISTING.getValue(), getEntityType(),
-                entityName, "");
-        return (FeedInstanceResult) InstanceUtil
-                .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    public InstancesSummaryResult getInstanceSummary(String entityName, String params)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_SUMMARY.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesSummaryResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, null);
-    }
-
-    public List<String> getArchiveInfo() throws IOException, JSchException {
-        return Util.getStoreInfo(this, "/archive/" + getEntityType().toUpperCase());
-    }
-
-    public List<String> getStoreInfo() throws IOException, JSchException {
-        return Util.getStoreInfo(this, "/" + getEntityType().toUpperCase());
-    }
-
-    public InstancesResult getInstanceParams(String entityName, String params)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_PARAMS.getValue(), getEntityType(),
-            entityName, "");
-        return (InstancesResult) InstanceUtil
-            .createAndSendRequestProcessInstance(url, params, allColo, null);
-    }
-
-    /**
-     * Retrieves instance triage.
-     */
-    public TriageResult getInstanceTriage(String entityName, String params)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_TRIAGE.getValue(), getEntityType(), entityName);
-        return (TriageResult) InstanceUtil.createAndSendRequestProcessInstance(url, params, allColo, null);
-    }
-
-    /**
-     * Lists all entities which are tagged by a given pipeline.
-     * @param pipeline filter
-     * @return service response
-     * @throws AuthenticationException
-     * @throws IOException
-     * @throws URISyntaxException
-     */
-    public ServiceResponse getListByPipeline(String pipeline)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.LIST_URL.getValue() + "/" + getEntityType());
-        url += "?filterBy=PIPELINES:" + pipeline;
-        return Util.sendRequest(url, "get", null, null);
-    }
-
-    /**
-     * Submit an entity through falcon client.
-     * @param entityStr string of the entity to be submitted
-     * @throws IOException
-     */
-    public ExecResult clientSubmit(final String entityStr) throws IOException {
-        LOGGER.info("Submitting " + getEntityType() + " through falcon client: \n"
-            + Util.prettyPrintXml(entityStr));
-        final String fileName = FileUtil.writeEntityToFile(entityStr);
-        final CommandLine commandLine = FalconClientBuilder.getBuilder()
-                .getSubmitCommand(getEntityType(), fileName).build();
-        return ExecUtil.executeCommand(commandLine);
-    }
-
-    /**
-     * Get CLI metrics for recipe based process or feed replication.
-     * @param entityName
-     * @return
-     */
-    public ExecResult getCLIMetrics(String entityName) {
-        LOGGER.info("Getting CLI metrics for " + getEntityType()+ " " + entityName);
-        final CommandLine commandLine = FalconClientBuilder.getBuilder()
-            .getMetricsCommand(getEntityType(), entityName).build();
-        return ExecUtil.executeCommand(commandLine);
-    }
-
-    /**
-     * Delete an entity through falcon client.
-     * @param entityStr string of the entity to be submitted
-     * @throws IOException
-     */
-    public ExecResult clientDelete(final String entityStr, String user) throws IOException {
-        final String entityName = getEntityName(entityStr);
-        LOGGER.info("Deleting " + getEntityType() + ": " + entityName);
-        final CommandLine commandLine = FalconClientBuilder.getBuilder(user)
-                .getDeleteCommand(getEntityType(), entityName).build();
-        return ExecUtil.executeCommand(commandLine);
-    }
-
-    /**
-     * Retrieves entities summary.
-     * @param clusterName compulsory parameter for request
-     * @param params list of optional parameters
-     * @return entity summary along with its instances.
-     */
-    public ServiceResponse getEntitySummary(String clusterName, String params)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.ENTITY_SUMMARY.getValue(),
-            getEntityType()) +"?cluster=" + clusterName;
-        if (StringUtils.isNotEmpty(params)) {
-            url += "&" + params;
-        }
-        return Util.sendRequest(url, "get", null, null);
-    }
-
-    /**
-     * Get list of all instances of a given entity.
-     * @param entityName entity name
-     * @param params list of optional parameters
-     * @param user user name
-     * @return response
-     */
-    public InstancesResult listInstances(String entityName, String params, String user)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_LIST.getValue(), getEntityType(),
-            entityName + colo);
-        if (StringUtils.isNotEmpty(params)) {
-            url += colo.isEmpty() ? "?" + params : "&" + params;
-        }
-        return (InstancesResult) InstanceUtil.sendRequestProcessInstance(url, user);
-    }
-
-    /**
-     * Get list of all dependencies of a given entity.
-     * @param entityName entity name
-     * @return response
-     * @throws URISyntaxException
-     * @throws AuthenticationException
-     * @throws InterruptedException
-     * @throws IOException
-     */
-    public ServiceResponse getDependencies(String entityName)
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException {
-        String url = createUrl(this.hostname + URLS.DEPENDENCIES.getValue(), getEntityType(), entityName + colo);
-        return Util.sendRequest(url, "get", null, null);
-    }
-
-    public ServiceResponse touchEntity(String data)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return touchEntity(Util.readEntityName(data), data, null);
-    }
-
-    public ServiceResponse touchEntity(String entityName, String data, String user)
-        throws AuthenticationException, IOException, URISyntaxException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.TOUCH_URL.getValue(), getEntityType(),
-                entityName + colo);
-        return Util.sendRequest(url, "post", data, user);
-    }
-
-    /**
-     * Retrieves entities lineage.
-     * @param params list of optional parameters
-     * @return entity lineage for the given pipeline.
-     */
-    public ServiceResponse getEntityLineage(String params)
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException {
-        String url = createUrl(this.hostname + URLS.ENTITY_LINEAGE.getValue(), colo);
-        if (StringUtils.isNotEmpty(params)){
-            url += colo.isEmpty() ? "?" + params : "&" + params;
-        }
-        return Util.sendJSONRequest(createUrl(url), "get", null, null);
-    }
-
-    /**
-     * Retrieves instance dependencies.
-     */
-    public InstanceDependencyResult getInstanceDependencies(
-            String entityName, String params, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        String url = createUrl(this.hostname + URLS.INSTANCE_DEPENDENCIES.getValue(), getEntityType(), entityName, "");
-        return (InstanceDependencyResult) InstanceUtil
-                .createAndSendRequestProcessInstance(url, params, allColo, user);
-    }
-
-    /**
-     * Retrieves sla alerts.
-     * @param params list of optional parameters
-     * @return instances with sla missed.
-     */
-    public ServiceResponse getSlaAlert(String params)
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException {
-        String url = createUrl(this.hostname + URLS.SLA.getValue(),
-                getEntityType());
-        if (StringUtils.isNotEmpty(params)) {
-            url +=  params;
-        }
-        return Util.sendJSONRequest(createUrl(url), "get", null, null);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ClusterEntityHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ClusterEntityHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ClusterEntityHelper.java
deleted file mode 100644
index acc01fa..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ClusterEntityHelper.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers.entity;
-
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-/** Helper class to work with cluster endpoints of a colo. */
-public class ClusterEntityHelper extends AbstractEntityHelper {
-
-
-    private static final String INVALID_ERR = "Not Valid for Cluster Entity";
-
-    public ClusterEntityHelper(String prefix) {
-        super(prefix);
-    }
-
-    public String getEntityType() {
-        return "cluster";
-    }
-
-    public String getEntityName(String entity) {
-        return Util.readEntityName(entity);
-    }
-
-    public ServiceResponse getStatus(String data, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    public ServiceResponse resume(String data, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    public ServiceResponse schedule(String data, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    public ServiceResponse submitAndSchedule(String data, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    public ServiceResponse suspend(String data, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesResult getRunningInstance(String name, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesResult getProcessInstanceStatus(
-        String readEntityName, String params, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-
-    public InstancesResult getProcessInstanceSuspend(
-        String readEntityName, String params, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public ServiceResponse update(String oldEntity, String newEntity, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesResult getProcessInstanceKill(String readEntityName,
-                                                         String string, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesResult getProcessInstanceRerun(
-        String readEntityName, String string, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesResult getProcessInstanceResume(
-        String readEntityName, String string, String user) {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public InstancesSummaryResult getInstanceSummary(String readEntityName,
-                                                     String string
-    ) throws
-        IOException, URISyntaxException {
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-
-    @Override
-    public ServiceResponse getListByPipeline(String pipeline){
-        throw new UnsupportedOperationException(INVALID_ERR);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/EntityHelperFactory.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/EntityHelperFactory.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/EntityHelperFactory.java
deleted file mode 100644
index 7b62656..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/EntityHelperFactory.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers.entity;
-
-import org.apache.falcon.entity.v0.EntityType;
-
-/** Factory class to create helper objects. */
-public final class EntityHelperFactory {
-    private EntityHelperFactory() {
-    }
-
-    public static AbstractEntityHelper getEntityHelper(EntityType type, String prefix) {
-        switch (type) {
-        case FEED:
-            return new FeedEntityHelper(prefix);
-        case CLUSTER:
-            return new ClusterEntityHelper(prefix);
-        case PROCESS:
-            return new ProcessEntityHelper(prefix);
-        default:
-            return null;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/FeedEntityHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/FeedEntityHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/FeedEntityHelper.java
deleted file mode 100644
index 437f997..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/FeedEntityHelper.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers.entity;
-
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.Util;
-
-/** Helper class to work with feed endpoints of a colo. */
-public class FeedEntityHelper extends AbstractEntityHelper {
-
-    public FeedEntityHelper(String prefix) {
-        super(prefix);
-    }
-
-    public String getEntityType() {
-        return "feed";
-    }
-
-    public String getEntityName(String entity) {
-        return Util.readEntityName(entity);
-    }
-
-    @Override
-    public ServiceResponse getListByPipeline(String pipeline){
-        throw new UnsupportedOperationException("Not valid for Feed Entity.");
-    }
-}
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ProcessEntityHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ProcessEntityHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ProcessEntityHelper.java
deleted file mode 100644
index 76ad638..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/helpers/entity/ProcessEntityHelper.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.helpers.entity;
-
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-
-/** Helper class to work with process endpoints of a colo. */
-public class ProcessEntityHelper extends AbstractEntityHelper {
-
-    public ProcessEntityHelper(String prefix) {
-        super(prefix);
-    }
-
-    public String getEntityType() {
-        return "process";
-    }
-
-    public String getEntityName(String entity) {
-        return new ProcessMerlin(entity).getName();
-    }
-
-}
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/ServiceResponse.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/ServiceResponse.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/ServiceResponse.java
deleted file mode 100644
index f66d426..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/ServiceResponse.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response;
-
-import com.google.gson.GsonBuilder;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.EntitySummaryResult;
-import org.apache.falcon.resource.LineageGraphResult;
-import org.apache.falcon.resource.SchedulableEntityInstanceResult;
-import org.apache.http.HttpResponse;
-import org.apache.log4j.Logger;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import java.io.IOException;
-import java.io.StringReader;
-
-/** Class to represent falcon's response to a rest request. */
-public class ServiceResponse {
-    private static final Logger LOGGER = Logger.getLogger(ServiceResponse.class);
-
-    private String message;
-    private int code;
-    private HttpResponse response;
-
-    public int getCode() {
-        return code;
-    }
-
-    public void setCode(int code) {
-        this.code = code;
-    }
-
-    public String getMessage() {
-        return message;
-    }
-
-    public void setMessage(String message) {
-        this.message = message;
-    }
-
-    public HttpResponse getResponse() {
-        return response;
-    }
-
-    public void setResponse(HttpResponse response) {
-        this.response = response;
-    }
-
-    public ServiceResponse(String message, int code) {
-        this.message = message;
-        this.code = code;
-    }
-
-    public ServiceResponse(HttpResponse response) throws IOException {
-        this.message = IOUtils.toString(response.getEntity().getContent());
-        this.code = response.getStatusLine().getStatusCode();
-        this.response = response;
-
-        LOGGER.info("The web service response is:\n" + Util.prettyPrintXmlOrJson(message));
-    }
-
-    public ServiceResponse() {
-    }
-
-    /**
-     * Retrieves EntitiesResult from a message if possible.
-     * @return EntitiesResult
-     */
-    public EntityList getEntityList(){
-        try {
-            JAXBContext jc = JAXBContext.newInstance(EntityList.class);
-            Unmarshaller u = jc.createUnmarshaller();
-            return  (EntityList) u.unmarshal(new StringReader(message));
-        } catch (JAXBException e) {
-            LOGGER.info("getEntityList() failed:\n" + ExceptionUtils.getStackTrace(e));
-            return null;
-        }
-    }
-
-    /**
-     * Retrieves EntitySummaryResult from a message if possible.
-     * @return EntitiesResult
-     */
-    public EntitySummaryResult getEntitySummaryResult() {
-        try {
-            JAXBContext jc = JAXBContext.newInstance(EntitySummaryResult.class);
-            Unmarshaller u = jc.createUnmarshaller();
-            return  (EntitySummaryResult) u.unmarshal(new StringReader(message));
-        } catch (JAXBException e) {
-            LOGGER.info("getEntitySummaryResult() failed:\n" + ExceptionUtils.getStackTrace(e));
-            return null;
-        }
-    }
-
-    /**
-     * Retrieves LineageGraphResult from a message if possible.
-     * @return LineageGraphResult
-     */
-    public LineageGraphResult getLineageGraphResult() {
-        LineageGraphResult lineageGraphResult = new GsonBuilder().create().fromJson(message, LineageGraphResult.class);
-        return lineageGraphResult;
-    }
-
-    /**
-     * Retrieves SchedulableEntityInstanceResult from a message if possible.
-     * @return SchedulableEntityInstanceResult
-     */
-    public SchedulableEntityInstanceResult getSlaResult() {
-        SchedulableEntityInstanceResult schedulableEntityInstanceResult = new GsonBuilder().create().fromJson(message,
-                SchedulableEntityInstanceResult.class);
-        return schedulableEntityInstanceResult;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Direction.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Direction.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Direction.java
deleted file mode 100644
index 56e3d7c..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Direction.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-/** Enum for all the direction values associated with edges. */
-public enum Direction {
-    outGoingEdges("outE"),
-    inComingEdges("inE"),
-    bothEdges("bothE"),
-    outgoingVertices("out"),
-    inComingVertices("in"),
-    bothVertices("both"),
-    outCount("outCount"),
-    inCount("inCount"),
-    bothCount("bothCount"),
-    outgoingVerticesIds("outIds"),
-    incomingVerticesIds("inIds"),
-    bothVerticesIds("bothIds");
-
-    private final String value;
-
-    Direction(String value) {
-        this.value = value;
-    }
-
-    public String getValue() {
-        return value;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Edge.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Edge.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Edge.java
deleted file mode 100644
index f1c408e..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Edge.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import com.google.gson.annotations.SerializedName;
-
-/** Class for representing an edge. */
-public class Edge extends GraphEntity {
-
-    /** Class for representing different labels of edge. */
-    public static enum LabelType {
-        @SerializedName("stored-in")STORED_IN,
-        @SerializedName("runs-on")RUNS_ON,
-        @SerializedName("input")INPUT,
-        @SerializedName("output")OUTPUT,
-
-        @SerializedName("instance-of")INSTANCE_ENTITY_EDGE,
-
-        @SerializedName("collocated")CLUSTER_COLO,
-        @SerializedName("owned-by")OWNED_BY,
-        @SerializedName("grouped-as")GROUPS,
-
-        @SerializedName("pipeline")PIPELINES,
-
-        // replication labels
-        @SerializedName("replicated-to")FEED_CLUSTER_REPLICATED_EDGE,
-
-        // eviction labels
-        @SerializedName("evicted-from")FEED_CLUSTER_EVICTED_EDGE,
-
-        //custom labels for test tags
-        @SerializedName("test")TEST,
-        @SerializedName("testname")TESTNAME,
-        @SerializedName("first")FIRST,
-        @SerializedName("second")SECOND,
-        @SerializedName("third")THIRD,
-        @SerializedName("fourth")FOURTH,
-        @SerializedName("fifth")FIFTH,
-        @SerializedName("sixth")SIXTH,
-        @SerializedName("seventh")SEVENTH,
-        @SerializedName("eighth")EIGHTH,
-        @SerializedName("ninth")NINTH,
-        @SerializedName("tenth")TENTH,
-        @SerializedName("value")VALUE,
-        @SerializedName("_falcon_mirroring_type")MIRRORING_TYPE,
-        @SerializedName("specific")SPECIFIC,
-        @SerializedName("myTag1")MY_TAG
-    }
-    @SerializedName("_id")
-    private String id;
-
-    @SerializedName("_outV")
-    private int outV;
-
-    @SerializedName("_inV")
-    private int inV;
-
-    @SerializedName("_label")
-    private LabelType label;
-
-    public String getId() {
-        return id;
-    }
-
-    public int getOutV() {
-        return outV;
-    }
-
-    public int getInV() {
-        return inV;
-    }
-
-    public LabelType getLabel() {
-        return label;
-    }
-
-    @Override
-    public String toString() {
-        return "Edge{"
-                + "id='" + id + '\''
-                + ", outV=" + outV
-                + ", inV=" + inV
-                + ", label=" + label
-                + '}';
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgeResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgeResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgeResult.java
deleted file mode 100644
index 9a062e0..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgeResult.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-/** Class for Lineage API result having an edge. */
-public class EdgeResult {
-    private Edge results;
-
-    public Edge getResults() {
-        return results;
-    }
-
-    @Override
-    public String toString() {
-        return "EdgeResult{" + "results=" + results + '}';
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgesResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgesResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgesResult.java
deleted file mode 100644
index 5fdbd89..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/EdgesResult.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** Class for Lineage API result having edges. */
-public class EdgesResult extends GraphResult {
-    private List<Edge> results;
-
-    public List<Edge> getResults() {
-        return results;
-    }
-
-    @Override
-    public String toString() {
-        return String.format("EdgesResult{totalSize=%d, results=%s}", totalSize, results);
-    }
-
-    public List<Edge> filterByType(Edge.LabelType edgeLabel) {
-        return filterEdgesByType(results, edgeLabel);
-    }
-
-    public List<Edge> filterEdgesByType(List<Edge> edges, Edge.LabelType edgeLabel) {
-        final List<Edge> result = new ArrayList<>();
-        for (Edge edge : edges) {
-            if (edge.getLabel() == edgeLabel) {
-                result.add(edge);
-            }
-        }
-        return result;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphEntity.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphEntity.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphEntity.java
deleted file mode 100644
index 87ca65b..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphEntity.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import com.google.gson.annotations.SerializedName;
-
-/** Abstract class for graph entities. */
-public abstract class GraphEntity {
-    @SerializedName("_type")
-    protected NODE_TYPE nodeType;
-
-    public NODE_TYPE getNodeType() {
-        return nodeType;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphResult.java
deleted file mode 100644
index aba5c4c..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/GraphResult.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import java.util.List;
-
-/** Abstract class for representing a result of lineage api call.*/
-public abstract class GraphResult {
-    protected int totalSize;
-
-    public int getTotalSize() {
-        return totalSize;
-    }
-
-    public abstract List<?> getResults();
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/NODE_TYPE.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/NODE_TYPE.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/NODE_TYPE.java
deleted file mode 100644
index da2132e..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/NODE_TYPE.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import com.google.gson.annotations.SerializedName;
-
-/** Enum for all the allowed node types. */
-public enum NODE_TYPE {
-    @SerializedName("vertex")VERTEX,
-    @SerializedName("edge")EDGE,
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Vertex.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Vertex.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Vertex.java
deleted file mode 100644
index c947dac..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/Vertex.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import com.google.gson.annotations.SerializedName;
-
-/** Class for representing a vertex. */
-public class Vertex extends GraphEntity {
-
-    /** Enum for all the allowed filter keys. */
-    public static enum FilterKey {
-        name, type, timestamp, version,
-        userWorkflowEngine, userWorkflowName, userWorkflowVersion,
-        workflowId, runId, status, workflowEngineUrl, subflowId,
-    }
-
-    /** Enum for all the allowed vertex types. */
-    public static enum VERTEX_TYPE {
-        @SerializedName("cluster-entity")CLUSTER_ENTITY("cluster-entity"),
-        @SerializedName("feed-entity")FEED_ENTITY("feed-entity"),
-        @SerializedName("process-entity")PROCESS_ENTITY("process-entity"),
-
-        @SerializedName("feed-instance")FEED_INSTANCE("feed-instance"),
-        @SerializedName("process-instance")PROCESS_INSTANCE("process-instance"),
-
-        @SerializedName("user")USER("user"),
-        @SerializedName("data-center")COLO("data-center"),
-        @SerializedName("classification")TAGS("classification"),
-        @SerializedName("group")GROUPS("group"),
-        @SerializedName("pipelines")PIPELINES("pipelines");
-
-        private final String value;
-        VERTEX_TYPE(String value) {
-            this.value = value;
-        }
-
-        public String getValue() {
-            return value;
-        }
-    }
-
-    @SerializedName("_id")
-    private int id;
-    private String name;
-
-    private VERTEX_TYPE type;
-    private String timestamp;
-    private String version;
-
-    private String userWorkflowEngine;
-    private String userWorkflowName;
-    private String userWorkflowVersion;
-
-    private String workflowId;
-    private String runId;
-    private String status;
-    private String workflowEngineUrl;
-    private String subflowId;
-
-    public int getId() {
-        return id;
-    }
-
-    public String getTimestamp() {
-        return timestamp;
-    }
-
-    public VERTEX_TYPE getType() {
-        return type;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public String getNominalTime() {
-        return name.split("/")[1];
-    }
-
-    @Override
-    public String toString() {
-        return "Vertex{"
-                + "id=" + id
-                + ", nodeType=" + nodeType
-                + ", name='" + name + '\''
-                + ", type=" + type
-                + ", timestamp='" + timestamp + '\''
-                + ", version='" + version + '\''
-                + ", userWorkflowEngine='" + userWorkflowEngine + '\''
-                + ", userWorkflowName='" + userWorkflowName + '\''
-                + ", userWorkflowVersion='" + userWorkflowVersion + '\''
-                + ", workflowId='" + workflowId + '\''
-                + ", runId='" + runId + '\''
-                + ", status='" + status + '\''
-                + ", workflowEngineUrl='" + workflowEngineUrl + '\''
-                + ", subflowId='" + subflowId + '\''
-                + '}';
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (!(o instanceof Vertex)) {
-            return false;
-        }
-
-        Vertex vertex = (Vertex) o;
-
-        if (id != vertex.id || !name.equals(vertex.name)
-                || (runId != null ? !runId.equals(vertex.runId) : vertex.runId != null)
-                || (status != null ? !status.equals(vertex.status) : vertex.status != null)
-                || (subflowId != null ? !subflowId.equals(vertex.subflowId)
-                        : vertex.subflowId != null)
-                || !timestamp.equals(vertex.timestamp)
-                || type != vertex.type
-                || (userWorkflowEngine != null
-                        ? !userWorkflowEngine.equals(vertex.userWorkflowEngine)
-                        : vertex.userWorkflowEngine != null)
-                || (userWorkflowName != null ? !userWorkflowName.equals(vertex.userWorkflowName)
-                        : vertex.userWorkflowName != null)
-                || (userWorkflowVersion != null
-                        ? !userWorkflowVersion.equals(vertex.userWorkflowVersion)
-                        : vertex.userWorkflowVersion != null)
-                || (version != null ? !version.equals(vertex.version) : vertex.version != null)
-                || (workflowEngineUrl != null
-                        ? !workflowEngineUrl.equals(vertex.workflowEngineUrl)
-                        : vertex.workflowEngineUrl != null)
-                || (workflowId != null ? !workflowId.equals(vertex.workflowId)
-                        : vertex.workflowId != null)) {
-            return false;
-        }
-
-        return true;
-    }
-
-    @Override
-    public int hashCode() {
-        int result = id;
-        result = 31 * result + name.hashCode();
-        result = 31 * result + type.hashCode();
-        result = 31 * result + timestamp.hashCode();
-        result = 31 * result + (version != null ? version.hashCode() : 0);
-        result = 31 * result + (userWorkflowEngine != null ? userWorkflowEngine.hashCode() : 0);
-        result = 31 * result + (userWorkflowName != null ? userWorkflowName.hashCode() : 0);
-        result = 31 * result + (userWorkflowVersion != null ? userWorkflowVersion.hashCode() : 0);
-        result = 31 * result + (workflowId != null ? workflowId.hashCode() : 0);
-        result = 31 * result + (runId != null ? runId.hashCode() : 0);
-        result = 31 * result + (status != null ? status.hashCode() : 0);
-        result = 31 * result + (workflowEngineUrl != null ? workflowEngineUrl.hashCode() : 0);
-        result = 31 * result + (subflowId != null ? subflowId.hashCode() : 0);
-        return result;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexIdsResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexIdsResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexIdsResult.java
deleted file mode 100644
index 4279bdd..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexIdsResult.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import java.util.List;
-
-/** Class for Lineage API result having vertex ids. */
-public class VertexIdsResult extends GraphResult {
-    private List<Integer> results;
-
-    public List<Integer> getResults() {
-        return results;
-    }
-
-    @Override
-    public String toString() {
-        return String.format("VertexIdsResult{totalSize=%d, results=%s}", totalSize, results);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexResult.java
deleted file mode 100644
index 6d419bc..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VertexResult.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-/** Class for Lineage API result having one vertex. */
-public class VertexResult {
-    private Vertex results;
-
-    public Vertex getResults() {
-        return results;
-    }
-}


[09/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/InstancePage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/InstancePage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/InstancePage.java
deleted file mode 100644
index 48193a2..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/InstancePage.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.PageFactory;
-
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Set;
-
-
-/**
- * Class representation of Search UI entity page.
- */
-public class InstancePage extends AbstractSearchPage {
-    private final String nominalTime;
-
-    /**
-     * Possible instance actions available on instance page.
-     */
-    public enum Button {
-        Resume,
-        Rerun,
-        Suspend,
-        Kill
-    }
-
-    public InstancePage(WebDriver driver) {
-        super(driver);
-        nominalTime = driver.findElement(By.xpath("//h3")).getText().split("\\|")[1].trim();
-    }
-
-
-    @FindBys({
-            @FindBy(className = "detailsBox"),
-            @FindBy(className = "row")
-    })
-    private WebElement detailsBox;
-
-    @FindBys({
-            @FindBy(xpath = "//h3/a")
-    })
-    private WebElement entityLink;
-
-
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(detailsBox, "Dependency box");
-        UIAssert.assertDisplayed(entityLink, "Link to parrent entity");
-    }
-
-    public InstancePage refreshPage() {
-        return backToEntityPage().openInstance(nominalTime);
-    }
-
-    public String getStatus() {
-        return driver.findElement(By.xpath("//h4[@class='instance-title']/span")).getText();
-    }
-
-    public String getEntityName() {
-        return entityLink.getText();
-    }
-
-    public boolean isLineagePresent() {
-        List<WebElement> lineage = driver.findElements(By.className("lineage-graph"));
-        return !lineage.isEmpty() && lineage.get(0).isDisplayed();
-    }
-
-
-    public Set<Button> getButtons(boolean active) {
-        List<WebElement> buttons = detailsBox.findElement(By.className("buttonCell"))
-                .findElements(By.className("btn"));
-        Set<Button> result = EnumSet.noneOf(Button.class);
-        for (WebElement button : buttons) {
-            if ((button.getAttribute("disabled") == null) == active) {
-                result.add(Button.valueOf(button.getText()));
-            }
-        }
-        return result;
-    }
-
-    public void clickButton(Button button) {
-        detailsBox.findElement(By.className("buttonCell"))
-                .findElements(By.className("btn")).get(button.ordinal()).click();
-        waitForAngularToFinish();
-    }
-
-    public EntityPage backToEntityPage() {
-        entityLink.click();
-        return PageFactory.initElements(driver, EntityPage.class);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/LoginPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/LoginPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/LoginPage.java
deleted file mode 100644
index 5b261fb..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/LoginPage.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.PageFactory;
-import org.testng.Assert;
-
-/** Page object for the Login Page. */
-public class LoginPage extends AbstractSearchPage {
-    private static final Logger LOGGER = Logger.getLogger(LoginPage.class);
-    public static final String UI_DEFAULT_USER = MerlinConstants.CURRENT_USER_NAME;
-
-    public LoginPage(WebDriver driver) {
-        super(driver);
-    }
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "login")
-    })
-    protected WebElement loginElem;
-
-    public static LoginPage open(WebDriver driver) {
-        driver.get(UI_URL);
-        LOGGER.info("Opened a URL: " + UI_URL);
-        return PageFactory.initElements(driver, LoginPage.class);
-    }
-
-    private WebElement getUserTextBox() {
-        return loginElem.findElement(By.xpath("//input[@name='user']"));
-    }
-
-    public void appendToUserName(String text) {
-        getUserTextBox().sendKeys(text);
-    }
-
-    public String getUserVisibleWarning() {
-        final WebElement userTextBox = getUserTextBox();
-
-        final WebElement userWarnLabel = getParentElement(userTextBox).findElement(
-            By.xpath("//label[contains(@class, 'custom-danger') and contains(@class, 'validationMessageGral')]"));
-        if (userWarnLabel.isDisplayed()) {
-            return userWarnLabel.getText();
-        }
-        return "";
-    }
-
-    /** Try to login by pressing the login button. */
-    public void tryLogin() {
-        LOGGER.info("Trying to login.");
-        final WebElement loginButton = loginElem.findElement(By.id("login.submit"));
-        UIAssert.assertDisplayed(loginButton, "Login button");
-        loginButton.click();
-    }
-
-    /** Login successfully and take to the next page i.e. search page. */
-    public SearchPage doDefaultLogin() {
-        if (!MerlinConstants.IS_SECURE) {
-            getUserTextBox().clear();
-            appendToUserName(UI_DEFAULT_USER);
-            tryLogin();
-        }
-        LOGGER.info("Search page should have opened.");
-        final SearchPage searchPage = PageFactory.initElements(driver, SearchPage.class);
-        searchPage.checkPage();
-        final PageHeader searchHeader = searchPage.getPageHeader();
-        if (!MerlinConstants.IS_SECURE) {
-            searchHeader.checkLoggedIn();
-            Assert.assertEquals(searchHeader.getLoggedInUser(), LoginPage.UI_DEFAULT_USER,
-                "Unexpected user is displayed");
-        }
-        return searchPage;
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(loginElem, "Cluster box");
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/MirrorWizardPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/MirrorWizardPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/MirrorWizardPage.java
deleted file mode 100644
index f990c92..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/MirrorWizardPage.java
+++ /dev/null
@@ -1,517 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.cli.FalconCLI;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.RecipeMerlin;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTime;
-import org.joda.time.format.DateTimeFormat;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/** Page object of the Mirror creation page. */
-public class MirrorWizardPage extends AbstractSearchPage {
-    private static final Logger LOGGER = Logger.getLogger(MirrorWizardPage.class);
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "formPage")
-    })
-    private WebElement mirrorBox;
-
-    public MirrorWizardPage(WebDriver driver) {
-        super(driver);
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(mirrorBox, "Mirror box");
-    }
-
-
-    public void setName(String name) {
-        clearAndSetByNgModel("UIModel.name", name);
-    }
-
-    public void setTags(List<String> tags) {
-        //TODO add code here
-    }
-
-    public void setMirrorType(FalconCLI.RecipeOperation recipeOperation) {
-        switch (recipeOperation) {
-        case HDFS_REPLICATION:
-            driver.findElement(By.xpath("//button[contains(.,'File System')]")).click();
-            break;
-        case HIVE_DISASTER_RECOVERY:
-            driver.findElement(By.xpath("//button[contains(.,'HIVE')]")).click();
-            break;
-        default:
-            break;
-        }
-    }
-
-
-    public void setHiveReplication(RecipeMerlin recipeMerlin) {
-        if (StringUtils.isNotEmpty(recipeMerlin.getSourceTable())) {
-            clickById("targetHIVETablesRadio");
-            clearAndSetByNgModel("UIModel.source.hiveDatabase", recipeMerlin.getSourceDb());
-            clearAndSetByNgModel("UIModel.source.hiveTables", recipeMerlin.getSourceTable());
-        } else {
-            clickById("targetHIVEDatabaseRadio");
-            clearAndSetByNgModel("UIModel.source.hiveDatabases", recipeMerlin.getSourceDb());
-        }
-    }
-
-
-    public void setStartTime(String validityStartStr) {
-        final DateTime startDate = TimeUtil.oozieDateToDate(validityStartStr);
-
-        clearAndSetByNgModel("UIModel.validity.start", DateTimeFormat.forPattern("MM/dd/yyyy").print(startDate));
-        final WebElement startTimeBox = driver.findElement(By.className("startTimeBox"));
-        final List<WebElement> startHourAndMinute = startTimeBox.findElements(By.tagName("input"));
-        final WebElement hourText = startHourAndMinute.get(0);
-        final WebElement minuteText = startHourAndMinute.get(1);
-        clearAndSet(hourText, DateTimeFormat.forPattern("hh").print(startDate));
-        clearAndSet(minuteText, DateTimeFormat.forPattern("mm").print(startDate));
-        final WebElement amPmButton = startTimeBox.findElement(By.tagName("button"));
-        if (!amPmButton.getText().equals(DateTimeFormat.forPattern("a").print(startDate))) {
-            amPmButton.click();
-        }
-    }
-
-    public void setEndTime(String validityEndStr) {
-        final DateTime validityEnd = TimeUtil.oozieDateToDate(validityEndStr);
-
-        clearAndSetByNgModel("UIModel.validity.end", DateTimeFormat.forPattern("MM/dd/yyyy").print(validityEnd));
-        final WebElement startTimeBox = driver.findElement(By.className("endTimeBox"));
-        final List<WebElement> startHourAndMinute = startTimeBox.findElements(By.tagName("input"));
-        final WebElement hourText = startHourAndMinute.get(0);
-        final WebElement minuteText = startHourAndMinute.get(1);
-        clearAndSet(hourText, DateTimeFormat.forPattern("hh").print(validityEnd));
-        clearAndSet(minuteText, DateTimeFormat.forPattern("mm").print(validityEnd));
-        final WebElement amPmButton = startTimeBox.findElement(By.tagName("button"));
-        if (!amPmButton.getText().equals(DateTimeFormat.forPattern("a").print(validityEnd))) {
-            amPmButton.click();
-        }
-    }
-
-    public void toggleAdvancedOptions() {
-        final WebElement advanceOption = driver.findElement(By.xpath("//h4[contains(.,'Advanced options')]"));
-        advanceOption.click();
-    }
-
-    public void setFrequency(Frequency frequency) {
-        clearAndSetByNgModel("UIModel.frequency.number", frequency.getFrequency());
-        selectNgModelByVisibleText("UIModel.frequency.unit", frequency.getTimeUnit().name().toLowerCase());
-    }
-
-    public void setHdfsDistCpMaxMaps(String distCpMaxMaps) {
-        clearAndSetByNgModel("UIModel.allocation.hdfs.maxMaps", distCpMaxMaps);
-    }
-
-
-    public void setHdfsMaxBandwidth(String replicationMaxMaps) {
-        clearAndSetByNgModel("UIModel.allocation.hdfs.maxBandwidth", replicationMaxMaps);
-    }
-
-    public void setHiveDistCpMaxMaps(String distCpMaxMaps) {
-        clearAndSetByNgModel("UIModel.allocation.hive.maxMapsDistcp", distCpMaxMaps);
-    }
-
-
-    public void setHiveReplicationMaxMaps(String replicationMaxMaps) {
-        clearAndSetByNgModel("UIModel.allocation.hive.maxMapsMirror", replicationMaxMaps);
-    }
-
-    public void setMaxEvents(String maxEvents) {
-        clearAndSetByNgModel("UIModel.allocation.hive.maxMapsEvents", maxEvents);
-    }
-
-    public void setHiveMaxBandwidth(String maxBandWidth) {
-        clearAndSetByNgModel("UIModel.allocation.hive.maxBandwidth", maxBandWidth);
-    }
-
-
-    public void setSourceInfo(ClusterMerlin srcCluster) {
-        setSourceStaging(srcCluster.getLocation("staging"));
-        setSourceHiveEndpoint(srcCluster.getInterfaceEndpoint(Interfacetype.REGISTRY));
-    }
-
-    public void setSourceHiveEndpoint(String hiveEndpoint) {
-        clearAndSetByNgModel("UIModel.hiveOptions.source.hiveServerToEndpoint", hiveEndpoint);
-    }
-
-    public void setSourceStaging(String stagingLocation) {
-        clearAndSetByNgModel("UIModel.hiveOptions.source.stagingPath", stagingLocation);
-    }
-
-    public void setTargetInfo(ClusterMerlin tgtCluster) {
-        setTargetStaging(tgtCluster.getLocation("staging"));
-        setTargetHiveEndpoint(tgtCluster.getInterfaceEndpoint(Interfacetype.REGISTRY));
-    }
-
-    public void setTargetHiveEndpoint(String hiveEndPoint) {
-        clearAndSetByNgModel("UIModel.hiveOptions.target.hiveServerToEndpoint", hiveEndPoint);
-    }
-
-    public void setTargetStaging(String hiveEndpoint) {
-        clearAndSetByNgModel("UIModel.hiveOptions.target.stagingPath", hiveEndpoint);
-    }
-
-    public void setRetry(Retry retry) {
-        selectNgModelByVisibleText("UIModel.retry.policy", retry.getPolicy().toString().toUpperCase());
-        clearAndSetByNgModel("UIModel.retry.delay.number", retry.getDelay().getFrequency());
-        selectNgModelByVisibleText("UIModel.retry.delay.unit", retry.getDelay().getTimeUnit().name().toLowerCase());
-        clearAndSetByNgModel("UIModel.retry.attempts", String.valueOf(retry.getAttempts()));
-    }
-
-
-    public void setAcl(ACL acl) {
-        setAclOwner(acl.getOwner());
-        setAclGroup(acl.getGroup());
-        setAclPermission(acl.getPermission());
-    }
-
-    public void setAclOwner(String aclOwner) {
-        clearAndSetSlowlyByNgModel("UIModel.acl.owner", aclOwner);
-    }
-
-    public boolean isAclOwnerWarningDisplayed() {
-        final WebElement warning =
-            findElementByNgModel("UIModel.acl.owner").findElement(By.xpath("./following-sibling::*"));
-        waitForAngularToFinish();
-        return warning.isDisplayed();
-    }
-
-    public void setAclGroup(String aclGroup) {
-        clearAndSetSlowlyByNgModel("UIModel.acl.group", aclGroup);
-    }
-
-    public boolean isAclGroupWarningDisplayed() {
-        final WebElement warning =
-            findElementByNgModel("UIModel.acl.group").findElement(By.xpath("./following-sibling::*"));
-        waitForAngularToFinish();
-        return warning.isDisplayed();
-    }
-
-    public void setAclPermission(String aclPermission) {
-        clearAndSetSlowlyByNgModel("UIModel.acl.permissions", aclPermission);
-    }
-
-    public boolean isAclPermissionWarningDisplayed() {
-        final WebElement warning =
-            findElementByNgModel("UIModel.acl.permissions").findElement(By.xpath("./following-sibling::*"));
-        waitForAngularToFinish();
-        return warning.isDisplayed();
-    }
-
-    public void next() {
-        final WebElement nextButton = driver.findElement(By.xpath("//button[contains(.,'Next')]"));
-        nextButton.click();
-    }
-
-    public void previous() {
-        final WebElement prevButton = driver.findElement(By.xpath("//button[contains(.,'Previous')]"));
-        prevButton.click();
-    }
-
-    public void silentPrevious() {
-        try {
-            previous();
-        } catch (Exception ignore) {
-            //ignore
-        }
-    }
-
-    public void cancel() {
-        driver.findElement(By.xpath("//a[contains(.,'Cancel')]"));
-    }
-
-    public void save() {
-        final WebElement saveButton = driver.findElement(By.xpath("//button[contains(.,'Save')]"));
-        UIAssert.assertDisplayed(saveButton, "Save button in not displayed.");
-        saveButton.click();
-        waitForAlert();
-    }
-
-    public ClusterBlock getSourceBlock() {
-        return new ClusterBlock("Source");
-    }
-
-    public ClusterBlock getTargetBlock() {
-        return new ClusterBlock("Target");
-    }
-
-    /**
-     * Populates hive dr UI with parameters from recipe.
-     * @param recipe recipe
-     * @param overwriteDefaults should it overwrite HiveDR default values automatically picked up by UI
-     */
-    public void applyRecipe(RecipeMerlin recipe, boolean overwriteDefaults) {
-        final ClusterMerlin srcCluster = recipe.getSrcCluster();
-        final ClusterMerlin tgtCluster = recipe.getTgtCluster();
-        setName(recipe.getName());
-        setTags(recipe.getTags());
-        setMirrorType(recipe.getRecipeOperation());
-        getSourceBlock().selectCluster(srcCluster.getName());
-        getTargetBlock().selectCluster(tgtCluster.getName());
-        getSourceBlock().selectRunHere();
-        setStartTime(recipe.getValidityStart());
-        setEndTime(recipe.getValidityEnd());
-        toggleAdvancedOptions();
-        switch (recipe.getRecipeOperation()) {
-        case HDFS_REPLICATION:
-            getSourceBlock().setPath(recipe.getSourceDir());
-            getTargetBlock().setPath(recipe.getTargetDir());
-            setHdfsDistCpMaxMaps(recipe.getDistCpMaxMaps());
-            setHdfsMaxBandwidth(recipe.getDistCpMaxMaps());
-            break;
-        case HIVE_DISASTER_RECOVERY:
-            setHiveReplication(recipe);
-            setHiveDistCpMaxMaps(recipe.getDistCpMaxMaps());
-            setHiveReplicationMaxMaps(recipe.getReplicationMaxMaps());
-            setMaxEvents(recipe.getMaxEvents());
-            setHiveMaxBandwidth(recipe.getMapBandwidth());
-            if (overwriteDefaults) {
-                setSourceInfo(recipe.getSrcCluster());
-                setTargetInfo(recipe.getTgtCluster());
-            }
-            break;
-        default:
-            break;
-        }
-        setFrequency(recipe.getFrequency());
-        setRetry(recipe.getRetry());
-        setAcl(recipe.getAcl());
-    }
-
-    public int getStepNumber() {
-        try {
-            driver.findElement(By.xpath("//button[contains(.,'Previous')]"));
-            return 2;
-        } catch (Exception ignore) {
-            //ignore
-        }
-        return 1;
-    }
-
-    public Map<Summary, String> getSummaryProperties() {
-        String formText = driver.findElement(By.id("formSummaryBox")).getText();
-        Map<Summary, String> props = new EnumMap<>(Summary.class);
-        props.put(Summary.NAME, getBetween(formText, "Name", "Type"));
-        props.put(Summary.TYPE, getBetween(formText, "Type", "Tags"));
-        props.put(Summary.TAGS, getBetween(formText, "Tags", "Source"));
-        props.put(Summary.RUN_ON, getBetween(formText, "Run On", "Schedule"));
-        props.put(Summary.START, getBetween(formText, "Start on:", "End on:"));
-        props.put(Summary.END, getBetween(formText, "End on:", "Max Maps"));
-        props.put(Summary.MAX_MAPS, getBetween(formText, "Max Maps", "Max Bandwidth"));
-        props.put(Summary.MAX_BANDWIDTH, getBetween(formText, "Max Bandwidth", "ACL"));
-
-        props.put(Summary.ACL_OWNER, getBetween(formText, "Owner:", "Group:"));
-        props.put(Summary.ACL_GROUP, getBetween(formText, "Group:", "Permissions:"));
-        props.put(Summary.ACL_PERMISSIONS, getBetween(formText, "Permissions:", "Retry"));
-
-        props.put(Summary.RETRY_POLICY, getBetween(formText, "Policy:", "delay:"));
-        props.put(Summary.RETRY_DELAY, getBetween(formText, "delay:", "Attempts:"));
-        props.put(Summary.RETRY_ATTEMPTS, getBetween(formText, "Attempts:", "Frequency"));
-
-        props.put(Summary.FREQUENCY, getBetween(formText, "Frequency", "Previous"));
-
-        String source = getBetween(formText, "Source", "Target");
-        String target = getBetween(formText, "Target", "Run On");
-        if ("HDFS".equals(props.get(Summary.TYPE))) {
-            props.put(Summary.SOURCE_LOCATION, getBetween(source, "Location", "Path"));
-            props.put(Summary.TARGET_LOCATION, getBetween(target, "Location", "Path"));
-            if ("HDFS".equals(props.get(Summary.SOURCE_LOCATION))) {
-                props.put(Summary.SOURCE_CLUSTER, getBetween(source, "^", "Location"));
-                props.put(Summary.SOURCE_PATH, getBetween(source, "Path:", "$"));
-
-            } else {
-                props.put(Summary.SOURCE_PATH, getBetween(source, "Path:", "URL"));
-                props.put(Summary.SOURCE_URL, getBetween(source, "URL:", "$"));
-
-            }
-            if ("HDFS".equals(props.get(Summary.TARGET_LOCATION))) {
-                props.put(Summary.TARGET_CLUSTER, getBetween(target, "^", "Location"));
-                props.put(Summary.TARGET_PATH, getBetween(target, "Path:", "$"));
-
-            } else {
-                props.put(Summary.TARGET_PATH, getBetween(target, "Path:", "URL"));
-                props.put(Summary.TARGET_URL, getBetween(target, "URL:", "$"));
-
-            }
-
-        } else {
-            LOGGER.error("TODO Read info for HIVE replication.");
-        }
-
-
-        return props;
-    }
-
-    /** Parts of the mirror summary. */
-    public enum Summary {
-        NAME,
-        TYPE,
-        TAGS,
-        RUN_ON,
-        START,
-        END,
-        MAX_MAPS,
-        MAX_BANDWIDTH,
-        ACL_OWNER,
-        ACL_GROUP,
-        ACL_PERMISSIONS,
-        RETRY_POLICY,
-        RETRY_DELAY,
-        RETRY_ATTEMPTS,
-        FREQUENCY,
-        SOURCE_LOCATION,
-        SOURCE_PATH,
-        SOURCE_CLUSTER,
-        SOURCE_URL,
-        TARGET_LOCATION,
-        TARGET_PATH,
-        TARGET_CLUSTER,
-        TARGET_URL,
-
-    }
-
-    private static String getBetween(String text, String first, String second) {
-        Pattern pattern = Pattern.compile(".*" + first + "(.+)" + second + ".*", Pattern.DOTALL);
-        Matcher matcher = pattern.matcher(text);
-        if (matcher.find()) {
-            return matcher.group(1).trim();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Block of source or target cluster with parameters.
-     */
-    public final class ClusterBlock {
-        private final WebElement mainBlock;
-        private final WebElement runHereButton;
-        private final String blockType;
-
-        private ClusterBlock(String type) {
-            this.blockType = type;
-            mainBlock = driver.findElement(By.xpath("//h3[contains(.,'" + type + "')]/.."));
-            runHereButton = mainBlock.findElement(By.id("runJobOn" + type + "Radio"));
-        }
-
-        public Set<Location> getAvailableLocationTypes() {
-            List<WebElement> inputs = getLocationBox().findElements(By.xpath(".//input"));
-            Set<Location> result = EnumSet.noneOf(Location.class);
-            for (WebElement input : inputs) {
-                result.add(Location.getByInput(input));
-            }
-            return result;
-        }
-
-        public Location getSelectedLocationType() {
-            WebElement selected = getLocationBox()
-                .findElement(By.xpath("//input[contains(@class,'ng-valid-parse')]"));
-            return Location.getByInput(selected);
-        }
-
-        public void setLocationType(Location type) {
-            getLocationBox().findElement(By.xpath(
-                String.format(".//input[translate(@value,'azures','AZURES')='%s']", type.toString()))).click();
-        }
-
-        public void selectRunHere() {
-            runHereButton.click();
-        }
-
-        public Set<String> getAvailableClusters() {
-            List<WebElement> options = mainBlock.findElements(By.xpath(".//option[not(@disabled)]"));
-            Set<String> clusters = new TreeSet<>();
-            for (WebElement option : options) {
-                clusters.add(option.getText());
-            }
-            return clusters;
-        }
-
-        public void selectCluster(String clusterName) {
-            selectNgModelByVisibleText("UIModel." + blockType.toLowerCase() + ".cluster", clusterName);
-        }
-
-        public void setPath(String path) {
-            final WebElement srcPathElement = getPath();
-            clearAndSet(srcPathElement, path);
-        }
-
-        public boolean isRunHereSelected() {
-            return runHereButton.getAttribute("class").contains("ng-valid-parse");
-        }
-
-        public boolean isRunHereAvailable() {
-            return runHereButton.getAttribute("disabled") == null;
-        }
-
-
-        private WebElement getLocationBox() {
-            return mainBlock.findElement(By.className("locationBox"));
-        }
-
-        private WebElement getPath() {
-            return mainBlock.findElement(By.name(blockType.toLowerCase() + "ClusterPathInput"));
-        }
-
-
-
-    }
-
-    /**
-     * Types of source/target location.
-     */
-    public enum Location {
-        HDFS,
-        AZURE,
-        S3;
-
-        private static Location getByInput(WebElement input) {
-            return Location.valueOf(input.getAttribute("value").trim().toUpperCase());
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/PageHeader.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/PageHeader.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/PageHeader.java
deleted file mode 100644
index 61d9475..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/PageHeader.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.JavascriptExecutor;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.PageFactory;
-import org.openqa.selenium.support.ui.ExpectedConditions;
-import org.openqa.selenium.support.ui.WebDriverWait;
-import org.testng.Assert;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Page object for header of the search ui pages. */
-public class PageHeader {
-    private static final Logger LOGGER = Logger.getLogger(PageHeader.class);
-
-    protected WebDriver driver;
-
-    public PageHeader(WebDriver driver) {
-        this.driver = driver;
-    }
-
-    @FindBy(className = "navbar")
-    private WebElement header;
-
-    @FindBys({
-        @FindBy(className = "navbar"),
-        @FindBy(className = "logoTitle")
-    })
-    private WebElement homeButton;
-
-    @FindBys({
-        @FindBy(className = "navbar"),
-        @FindBy(className = "logoTitle"),
-        @FindBy(className = "falconLogo")
-    })
-    private WebElement falconLogo;
-
-    @FindBys({
-        @FindBy(className = "navbar"),
-        @FindBy(className = "loginHeaderBox")
-    })
-    private WebElement loginHeaderBox;
-
-    @FindBys({
-        @FindBy(className = "navbar"),
-        @FindBy(className = "createNavWrapper")
-    })
-    private WebElement createEntityBox;
-
-    @FindBy(id = "cluster.create")
-    private WebElement clusterCreateButton;
-
-    @FindBy(id = "feed.create")
-    private WebElement feedCreateButton;
-
-    @FindBy(id = "process.create")
-    private WebElement processCreateButton;
-
-    @FindBy(id = "dataset.create")
-    private WebElement mirrorCreateButton;
-
-    @FindBys({
-        @FindBy(className = "uploadNavWrapper"),
-    })
-    private WebElement uploadEntityBox;
-
-    @FindBys({
-        @FindBy(className = "uploadNavWrapper"),
-        @FindBy(className = "btn-file")
-    })
-    private WebElement uploadEntityButton;
-
-
-    public WebElement getHomeButton() {
-        return homeButton;
-    }
-
-    public void checkLoggedIn() {
-        Assert.assertEquals(getLogoutButton().getText(), "Logout",
-            "Unexpected text on logout button");
-    }
-
-    public SearchPage gotoHome() {
-        homeButton.click();
-        final SearchPage searchPage = PageFactory.initElements(driver, SearchPage.class);
-        searchPage.checkPage();
-        final PageHeader searchHeader = searchPage.getPageHeader();
-        if (!MerlinConstants.IS_SECURE) {
-            searchHeader.checkLoggedIn();
-            Assert.assertEquals(searchHeader.getLoggedInUser(), LoginPage.UI_DEFAULT_USER,
-                "Unexpected user is displayed");
-        }
-        return searchPage;
-    }
-
-    public void checkLoggedOut() {
-        UIAssert.assertNotDisplayed(getLogoutButton(), "logout button");
-    }
-
-    /**
-     * Check header and make sure all the buttons/links are working correctly. Handles both
-     * logged in and logged out scenarios.
-     */
-    public void checkHeader() {
-        //home button is always displayed
-        UIAssert.assertDisplayed(homeButton, "falcon logo");
-        Assert.assertEquals(homeButton.getText(), "Falcon", "Unexpected home button text");
-        UIAssert.assertDisplayed(falconLogo, "falcon logo");
-        final WebElement helpLink = loginHeaderBox.findElement(By.tagName("a"));
-        UIAssert.assertDisplayed(helpLink, "help link");
-
-        final String oldUrl = driver.getCurrentUrl();
-        //displayed if user is logged in: create entity buttons, upload entity button, username
-        if (MerlinConstants.IS_SECURE || getLogoutButton().isDisplayed()) {
-            //checking create entity box
-            UIAssert.assertDisplayed(createEntityBox, "Create entity box");
-            final WebElement createEntityLabel = createEntityBox.findElement(By.tagName("h4"));
-            Assert.assertEquals(createEntityLabel.getText(), "Create an entity",
-                "Unexpected create entity text");
-            //checking upload entity part
-            UIAssert.assertDisplayed(uploadEntityBox, "Create entity box");
-            final WebElement uploadEntityLabel = uploadEntityBox.findElement(By.tagName("h4"));
-            Assert.assertEquals(uploadEntityLabel.getText(), "Upload an entity",
-                "Unexpected upload entity text");
-            Assert.assertEquals(uploadEntityButton.getText(), "Browse for the XML file",
-                "Unexpected text on upload entity button");
-            //checking if logged-in username is displayed
-            if (!MerlinConstants.IS_SECURE) {
-                UIAssert.assertDisplayed(getLogoutButton(), "Logout button");
-                AssertUtil.assertNotEmpty(getLoggedInUser(), "Expecting logged-in username.");
-            }
-
-            //create button navigation
-            doCreateCluster();
-            driver.get(oldUrl);
-            doCreateFeed();
-            driver.get(oldUrl);
-            doCreateProcess();
-            driver.get(oldUrl);
-            doCreateMirror();
-            driver.get(oldUrl);
-        }
-        //home button navigation
-        homeButton.click();
-        Assert.assertTrue(getHomeUrls().contains(driver.getCurrentUrl()),
-            "home button navigate to: " + driver.getCurrentUrl() + " instead of: " + getHomeUrls());
-        driver.get(oldUrl);
-
-        //help link navigation
-        Assert.assertEquals(helpLink.getText(), "Help", "Help link expected to have text 'Help'");
-        clickLink(helpLink);
-        int helpPageloadTimeoutThreshold = 30;
-        new WebDriverWait(driver, helpPageloadTimeoutThreshold).until(
-            ExpectedConditions.stalenessOf(helpLink));
-        Assert.assertEquals(driver.getCurrentUrl(), MerlinConstants.HELP_URL,
-            "Unexpected help url");
-        driver.get(oldUrl);
-    }
-
-    /**
-     * Useful in cases when selenium fails to click a link due to it's bugs.
-     */
-    private void clickLink(WebElement link) {
-        JavascriptExecutor executor = (JavascriptExecutor) driver;
-        executor.executeScript("arguments[0].click();", link);
-    }
-
-    public void uploadXml(String filePath) throws IOException {
-        final WebElement uploadEntityTextBox = uploadEntityBox.findElement(By.id("files"));
-        uploadEntityTextBox.sendKeys(filePath);
-        //wait for alert
-        driver.findElements(
-            By.xpath("//div[@class='messages notifs' and contains(@style,'opacity') and not(contains(@style,'1;'))]"));
-    }
-
-    public ClusterWizardPage doCreateCluster() {
-        UIAssert.assertDisplayed(clusterCreateButton, "Cluster create button");
-        Assert.assertEquals(clusterCreateButton.getText(), "Cluster",
-            "Unexpected text on create cluster button");
-        clusterCreateButton.click();
-        final ClusterWizardPage clusterPage = PageFactory.initElements(driver, ClusterWizardPage.class);
-        clusterPage.checkPage();
-        return clusterPage;
-    }
-
-    public FeedWizardPage doCreateFeed() {
-        UIAssert.assertDisplayed(feedCreateButton, "Feed create button");
-        Assert.assertEquals(feedCreateButton.getText(), "Feed",
-            "Unexpected text on create feed button");
-        feedCreateButton.click();
-        final FeedWizardPage feedPage = PageFactory.initElements(driver, FeedWizardPage.class);
-        feedPage.checkPage();
-        return feedPage;
-    }
-
-    public ProcessWizardPage doCreateProcess() {
-        UIAssert.assertDisplayed(processCreateButton, "Process create button");
-        Assert.assertEquals(processCreateButton.getText(), "Process",
-            "Unexpected text on create process button");
-        processCreateButton.click();
-        final ProcessWizardPage processPage = PageFactory.initElements(driver, ProcessWizardPage.class);
-        processPage.checkPage();
-        return processPage;
-    }
-
-    public MirrorWizardPage doCreateMirror() {
-        UIAssert.assertDisplayed(mirrorCreateButton, "Mirror create button");
-        Assert.assertEquals(mirrorCreateButton.getText(), "Mirror",
-            "Unexpected text on create mirror button");
-        mirrorCreateButton.click();
-        final MirrorWizardPage mirrorPage = PageFactory.initElements(driver, MirrorWizardPage.class);
-        mirrorPage.checkPage();
-        return mirrorPage;
-    }
-
-    private List<String> getHomeUrls() {
-        List<String> urls = new ArrayList<>();
-        String homeUrl = MerlinConstants.PRISM_URL;
-        urls.add(homeUrl);
-        urls.add(homeUrl.replaceAll("/$", "") + "/#/");
-        return urls;
-    }
-
-    public String getLoggedInUser() {
-        return loginHeaderBox.findElement(By.tagName("div")).getText();
-    }
-
-    private WebElement getLogoutButton() {
-        return loginHeaderBox.findElements(By.xpath("button[@ng-click='logOut()']")).get(0);
-    }
-
-    private WebElement getNotificationButton() {
-        return loginHeaderBox.findElements(By.xpath("button[@ng-click='notify()']")).get(0);
-    }
-
-    /**
-     * Validates number of notifications contained by notification bar and last notification message.
-     */
-    public void validateNotificationCountAndCheckLast(int count, String message) {
-        WebElement notificationButton = getNotificationButton();
-        notificationButton.click();
-        waitForAngularToFinish();
-
-        // Test notifications dropdown visibility
-        WebElement notificationDropdown = notificationButton.findElement(By.className("messages"));
-        Assert.assertTrue(notificationDropdown.getAttribute("style").contains("display: block;"),
-            "Notifications are not visible.");
-
-        // Test validity of number of notifications
-        List<WebElement> notifications = notificationDropdown.findElements(By.xpath("div"));
-        Assert.assertEquals(notifications.size() - 1, count, "Invalid notification count.");
-
-        // Test validity of last notification
-        String lastNotification = notifications.get(0).getText();
-        Assert.assertTrue(lastNotification.contains(message), "Invalid last notification text.");
-    }
-
-    public LoginPage doLogout() {
-        LOGGER.info("Going to logout.");
-        getLogoutButton().click();
-        final LoginPage loginPage = PageFactory.initElements(driver, LoginPage.class);
-        loginPage.checkPage();
-        return loginPage;
-    }
-
-    protected void waitForAngularToFinish() {
-        final String javaScript = "return (window.angular != null) && "
-            + "(angular.element(document).injector() != null) && "
-            + "(angular.element(document).injector().get('$http').pendingRequests.length === 0)";
-        boolean isLoaded = false;
-        for (int i = 0; i < AbstractSearchPage.PAGELOAD_TIMEOUT_THRESHOLD && !isLoaded; i++) {
-            final Object output = ((JavascriptExecutor) driver).executeScript(javaScript);
-            isLoaded = Boolean.valueOf(output.toString());
-            LOGGER.info(i+1 + ". waiting on angular to finish.");
-            TimeUtil.sleepSeconds(1);
-        }
-        LOGGER.info("angular is done continuing...");
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ProcessWizardPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ProcessWizardPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ProcessWizardPage.java
deleted file mode 100644
index 8fcc5b7..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ProcessWizardPage.java
+++ /dev/null
@@ -1,960 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Clusters;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.entity.v0.process.ExecutionType;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.entity.v0.process.Validity;
-import org.apache.falcon.entity.v0.process.Workflow;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.ui.Select;
-import org.testng.Assert;
-
-import java.text.SimpleDateFormat;
-import java.util.List;
-import java.util.TimeZone;
-
-/** Page object of the Process creation page. */
-public class ProcessWizardPage extends EntityWizardPage {
-
-    private static final Logger LOGGER = Logger.getLogger(ProcessWizardPage.class);
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "entityForm")
-    })
-    private WebElement processBox;
-
-    @FindBy(xpath = "//form[@name='processForm']/div[1]")
-    private WebElement summaryBox;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "entityForm"),
-        @FindBy(className = "nextBtn")
-    })
-    private WebElement nextButton;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "entityForm"),
-        @FindBy(className = "prevBtn")
-    })
-    private WebElement previousButton;
-
-    @FindBy(xpath = "//a[contains(.,'Cancel')]")
-    private WebElement cancelButton;
-
-    @FindBy(xpath = "//fieldset[@id='fieldWrapper']")
-    private WebElement formBox;
-
-    public ProcessWizardPage(WebDriver driver) {
-        super(driver);
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(processBox, "Process box");
-    }
-
-    /**
-     * Completes step 1 and clicks next.
-     */
-    public void goToPropertiesStep(ProcessMerlin process) {
-        setProcessGeneralInfo(process);
-        clickNext();
-
-    }
-
-    public void goToClustersStep(ProcessMerlin process) {
-        goToPropertiesStep(process);
-
-        setProcessPropertiesInfo(process);
-        clickNext();
-    }
-
-    public void clickNext() {
-        nextButton.click();
-    }
-
-    public void clickPrevious(){
-        previousButton.click();
-    }
-
-    public void clickCancel(){
-        cancelButton.click();
-    }
-
-    /*----- Step1 General info ----*/
-
-    private WebElement getName() {
-        return driver.findElement(By.id("entityNameField"));
-    }
-
-    public void setName(String name) {
-        final WebElement nameElement = getName();
-        nameElement.clear();
-        for (String s : name.split("")) {
-            nameElement.sendKeys(s);
-        }
-    }
-    private WebElement getTagsSection() {
-        return driver.findElement(By.id("tagsSection"));
-    }
-
-    private WebElement getAddTagButton() {
-        return driver.findElement(By.className("formViewContainer"))
-            .findElement(By.xpath("./form/div[4]/button"));
-    }
-
-    private List<WebElement> getDeleteTagButtons() {
-        return getTagsSection().findElements(By.tagName("button"));
-    }
-
-    private List<WebElement> getTagTextFields() {
-        return getTagsSection().findElements(By.tagName("input"));
-    }
-
-    public void deleteTags() {
-        //delete all tags
-        final List<WebElement> deleteTagButtons = getDeleteTagButtons();
-        for (WebElement deleteTagButton : Lists.reverse(deleteTagButtons)) {
-            deleteTagButton.click();
-        }
-        for (WebElement textField : getTagTextFields()) {
-            textField.clear();
-        }
-    }
-
-    private WebElement getTagKey(int index) {
-        return processBox.findElements(By.xpath("//input[@ng-model='tag.key']")).get(index);
-    }
-    private WebElement getTagValue(int index) {
-        return processBox.findElements(By.xpath("//input[@ng-model='tag.value']")).get(index);
-    }
-
-    public void setTagKey(int index, String tagKey){
-        getTagKey(index).sendKeys(tagKey);
-    }
-    public void setTagValue(int index, String tagValue){
-        getTagValue(index).sendKeys(tagValue);
-    }
-
-    public void setTags(String tagsStr){
-        if (StringUtils.isEmpty(tagsStr)){
-            return;
-        }
-        String[] tags = tagsStr.split(",");
-        for (int i = 0; i < tags.length; i++){
-            String[] keyValue = tags[i].split("=");
-            setTagKey(i, keyValue[0]);
-            setTagValue(i, keyValue[1]);
-            if (tags.length > i + 1){
-                getAddTagButton().click();
-            }
-        }
-    }
-
-    public String getTagKeyText(int index){
-        return getTagKey(index).getAttribute("value");
-    }
-
-    public String getTagValueText(int index){
-        return getTagValue(index).getAttribute("value");
-    }
-
-    public boolean isPigRadioSelected(){
-        return getPigRadio().isSelected();
-    }
-
-    public String getEngineVersionText(){
-        return getEngineVersion().getFirstSelectedOption().getAttribute("value");
-    }
-
-    private WebElement getWfName() {
-        return driver.findElement(By.id("workflowNameField"));
-    }
-
-    private WebElement getOozieRadio() {
-        return driver.findElement(By.id("oozieEngineRadio"));
-    }
-
-    private WebElement getPigRadio() {
-        return driver.findElement(By.id("pigEngineRadio"));
-    }
-
-    private WebElement getHiveRadio() {
-        return driver.findElement(By.id("hiveEngineRadio"));
-    }
-
-    private Select getEngineVersion() {
-        return new Select(driver.findElement(By.id("engineVersionField")));
-    }
-
-    private WebElement getPath() {
-        return driver.findElement(By.id("pathField"));
-    }
-
-    public void setWorkflow(Workflow processWf) {
-        final WebElement wfName = getWfName();
-        wfName.clear();
-        wfName.sendKeys(processWf.getName());
-        switch (processWf.getEngine()) {
-        case OOZIE:
-            getOozieRadio().click();
-            break;
-        case PIG:
-            getPigRadio().click();
-            break;
-        case HIVE:
-            getHiveRadio().click();
-            break;
-        default:
-            Assert.fail("Unexpected workflow engine: " + processWf.getEngine());
-        }
-        final String version = processWf.getVersion();
-        // The getVersion() method returns '1.0' if its null, hence the hack below
-        if (StringUtils.isNotEmpty(version) && !version.equals("1.0")) {
-            getEngineVersion().selectByVisibleText(version);
-        }
-        final WebElement path = getPath();
-        path.clear();
-        path.sendKeys(processWf.getPath());
-    }
-
-    private WebElement getAclOwner() {
-        return driver.findElement(By.name("aclOwnerInput"));
-    }
-
-    private WebElement getAclGroup() {
-        return driver.findElement(By.name("aclGroupInput"));
-    }
-
-    private WebElement getAclPerm() {
-        return driver.findElement(By.name("aclPermissionsInput"));
-    }
-
-    public void setAcl(ACL acl) {
-        final WebElement aclOwner = getAclOwner();
-        aclOwner.clear();
-        aclOwner.sendKeys(acl.getOwner());
-        final WebElement aclGroup = getAclGroup();
-        aclGroup.clear();
-        aclGroup.sendKeys(acl.getGroup());
-        final WebElement aclPerm = getAclPerm();
-        aclPerm.clear();
-        aclPerm.sendKeys(acl.getPermission());
-    }
-
-    public void setProcessGeneralInfo(ProcessMerlin process) {
-        setName(process.getName());
-        final String tags = StringUtils.trimToEmpty(process.getTags());
-        setTags(tags);
-        setWorkflow(process.getWorkflow());
-        setAcl(process.getACL());
-    }
-
-    public void isFrequencyQuantityDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFrequencyQuantity(), "Frequency Quantity");
-        }else {
-            try{
-                getFrequencyQuantity();
-                Assert.fail("Frequency Quantity found");
-            } catch (Exception ex){
-                LOGGER.info("Frequency Quantity not found");
-            }
-        }
-    }
-
-    public void isValidityStartDateDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getStartDate(), "Cluster Validity Start Date");
-        }else {
-            try{
-                getStartDate();
-                Assert.fail("Cluster Validity Start Date found");
-            } catch (Exception ex){
-                LOGGER.info("Cluster Validity Start Date not found");
-            }
-        }
-    }
-
-    public void isAddInputButtonDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getAddInputButton(), "Add Input button.");
-        }else {
-            try{
-                getAddInputButton();
-                Assert.fail("Add Input Button found");
-            } catch (Exception ex){
-                LOGGER.info("Add Input Button not found");
-            }
-        }
-    }
-
-    public void isSaveButtonDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getSaveProcessButton(), "Save Button");
-        }else {
-            try{
-                getSaveProcessButton();
-                Assert.fail("Save Process Button found");
-            } catch (Exception ex){
-                LOGGER.info("Save Process Button not found");
-            }
-        }
-    }
-
-    private WebElement getSaveProcessButton(){
-        return formBox.findElement(By.xpath("//button[contains(.,'Save')]"));
-    }
-
-    public void isTagsDisplayed(int index, boolean isDisplayed){
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getTagKey(index), "Tag Key Index - " + index);
-            UIAssert.assertDisplayed(getTagValue(index), "Tag Value Index - " + index);
-        }else{
-            try{
-                getTagKey(index);
-                Assert.fail("Tag Key Index - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Tag Key Index - " + index + " not found");
-            }
-            try{
-                getTagValue(index);
-                Assert.fail("Tag Key Value - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Tag Key Value - " + index + " not found");
-            }
-        }
-    }
-
-    /*----- Step2 Properties ----*/
-
-    private Select getTimezone() {
-        return new Select(formBox.findElement(By.xpath("//select[contains(@class, 'TZSelect')]")));
-    }
-
-    public void setTimezone(TimeZone timezone) {
-        if (timezone == null) {
-            return;
-        }
-        String timeZone = timezone.getID();
-        getTimezone().selectByValue(timeZone);
-    }
-
-    private WebElement getFrequencyQuantity() {
-        return processBox.findElement(By.xpath("//input[@ng-model='process.frequency.quantity']"));
-    }
-    private Select getFrequencyUnit() {
-        return new Select(processBox.findElement(By.xpath(
-            "//select[@ng-model='process.frequency.unit']")));
-    }
-
-    public String getFrequencyQuantityText(){
-        return getFrequencyQuantity().getAttribute("value");
-    }
-
-    public String getMaxParallelInstancesText(){
-        return getMaxParallelInstances().getFirstSelectedOption().getAttribute("value");
-    }
-
-    public String getTimezoneText(){
-        return getTimezone().getFirstSelectedOption().getAttribute("value");
-    }
-
-    public String getOrderText(){
-        return getOrder().getFirstSelectedOption().getAttribute("value");
-    }
-
-    public void setFrequencyQuantity(String frequencyQuantity){
-        getFrequencyQuantity().sendKeys(frequencyQuantity);
-    }
-    public void setFrequencyUnit(String frequencyUnit){
-        getFrequencyUnit().selectByVisibleText(frequencyUnit);
-    }
-
-    public List<String> getTimezoneValues(){
-        return getDropdownValues(getTimezone());
-    }
-
-    public List<String> getFrequencyUnitValues(){
-        return getDropdownValues(getFrequencyUnit());
-    }
-
-    public List<String> getMaxParallelInstancesValues(){
-        return getDropdownValues(getMaxParallelInstances());
-    }
-
-    public List<String> getOrderValues(){
-        return getDropdownValues(getOrder());
-    }
-
-    public List<String> getRetryPolicyValues(){
-        return getDropdownValues(getRetryPolicy());
-    }
-
-    public List<String> getRetryDelayUnitValues(){
-        return getDropdownValues(getRetryDelayUnit());
-    }
-
-    private Select getMaxParallelInstances(){
-        return new Select(formBox.findElement(By.xpath("//select[@ng-model='process.parallel']")));
-    }
-
-    public void setMaxParallelInstances(int quantity) {
-        getMaxParallelInstances().selectByValue(String.valueOf(quantity));
-    }
-
-    private Select getOrder(){
-        return new Select(formBox.findElement(By.xpath("//select[@ng-model='process.order']")));
-    }
-
-    public void setOrder(ExecutionType order) {
-        getOrder().selectByValue(order.value());
-    }
-
-    private Select getRetryPolicy(){
-        return new Select(formBox.findElement(By.xpath("//select[@ng-model='process.retry.policy']")));
-    }
-
-    private Select getRetryDelayUnit(){
-        return new Select(formBox.findElement(By.xpath("//select[@ng-model='process.retry.delay.unit']")));
-    }
-
-    private WebElement getAttempts(){
-        return formBox.findElement(By.id("attemptsField"));
-    }
-
-    private WebElement getDelayQuantity(){
-        return formBox.findElement(By.id("delayQuantity"));
-    }
-
-    public void setRetry(Retry retry) {
-        getRetryPolicy().selectByValue(retry.getPolicy().value());
-        getAttempts().sendKeys(String.valueOf(retry.getAttempts()));
-        getDelayQuantity().sendKeys(retry.getDelay().getFrequency());
-        getRetryDelayUnit().selectByValue(retry.getDelay().getTimeUnit().name());
-    }
-
-    /**
-     * Enter process info on Page 2 of processSetup Wizard.
-     */
-    public void setProcessPropertiesInfo(ProcessMerlin process) {
-        setTimezone(process.getTimezone());
-        setFrequencyQuantity(process.getFrequency().getFrequency());
-        setFrequencyUnit(process.getFrequency().getTimeUnit().toString());
-        setMaxParallelInstances(process.getParallel());
-        setOrder(process.getOrder());
-        setRetry(process.getRetry());
-    }
-
-    /*-----Step3 Clusters-------*/
-
-    public WebElement getStartDate() {
-        List<WebElement> inputs = driver.findElements(
-            By.xpath("//input[contains(@ng-model, 'cluster.validity.start.date')]"));
-        return inputs.get(inputs.size() - 1);
-    }
-
-    public WebElement getEndDate() {
-        List<WebElement> inputs = formBox.findElements(
-            By.xpath("//input[contains(@ng-model, 'cluster.validity.end.date')]"));
-        return inputs.get(inputs.size() - 1);
-    }
-
-    public String getValidityEnd() {
-        return String.format("%s %s:%s", getEndDate().getAttribute("value"), getEndHours().getAttribute("value"),
-            getEndMinutes().getAttribute("value"));
-    }
-
-    public WebElement getStartHours() {
-        List<WebElement> inputs = formBox.findElements(By.xpath("//input[contains(@ng-model, 'hours')]"));
-        return inputs.get(inputs.size() - 2);
-    }
-
-    public WebElement getEndHours() {
-        List<WebElement> inputs = formBox.findElements(By.xpath("//input[contains(@ng-model, 'hours')]"));
-        return inputs.get(inputs.size() - 1);
-    }
-
-    public WebElement getStartMinutes() {
-        List<WebElement> inputs = formBox.findElements(By.xpath("//input[contains(@ng-model, 'minutes')]"));
-        return inputs.get(inputs.size() - 2);
-    }
-
-    public WebElement getEndMinutes() {
-        List<WebElement> inputs = formBox.findElements(By.xpath("//input[contains(@ng-model, 'minutes')]"));
-        return inputs.get(inputs.size() - 1);
-    }
-
-    public WebElement getStartMeredian() {
-        List<WebElement> buttons = formBox.findElements(By.xpath("//td[@ng-show='showMeridian']/button"));
-        return buttons.get(buttons.size() - 2);
-    }
-
-    public WebElement getEndMeredian() {
-        List<WebElement> buttons = formBox.findElements(By.xpath("//td[@ng-show='showMeridian']/button"));
-        return buttons.get(buttons.size() - 1);
-    }
-
-    /**
-     * Retrieves the last cluster select.
-     */
-    public Select getClusterSelect() {
-        List<WebElement> selects = formBox.findElements(By.xpath("//select[contains(@ng-model, 'cluster.name')]"));
-        return new Select(selects.get(selects.size() - 1));
-    }
-
-    public void clickAddClusterButton() {
-        int initialSize = getWizardClusterCount();
-        formBox.findElement(By.xpath("//button[contains(., 'add cluster')]")).click();
-        int finalSize = getWizardClusterCount();
-        Assert.assertEquals(finalSize - initialSize, 1, "New cluster block should been added.");
-    }
-
-    /**
-     * Removes last cluster on the form.
-     */
-    public void deleteLastCluster() {
-        int initialSize = getWizardClusterCount();
-        List<WebElement> buttons = formBox.findElements(By.xpath("//button[contains(., 'delete')]"));
-        Assert.assertTrue(buttons.size() > 0,
-            "Delete button should be present. There should be at least 2 cluster blocks");
-        buttons.get(buttons.size() - 1).click();
-        int finalSize = getWizardClusterCount();
-        Assert.assertEquals(initialSize - finalSize, 1, "One cluster block should been removed.");
-    }
-
-    /**
-     * Sets multiple clusters in process.
-     */
-    public void setClusters(Clusters clusters) {
-        for (int i = 0; i < clusters.getClusters().size(); i++) {
-            if (i > 0) {
-                clickAddClusterButton();
-            }
-            setCluster(clusters.getClusters().get(i));
-        }
-    }
-
-    /**
-     * Fills the last cluster on the form.
-     */
-    public void setCluster(Cluster cluster) {
-        selectCluster(cluster.getName());
-        setClusterValidity(cluster);
-    }
-
-    /**
-     * Populates cluster form with values from process.Cluster object.
-     * @param cluster process process.Cluster object
-     */
-    public void setClusterValidity(Cluster cluster) {
-        SimpleDateFormat format = new SimpleDateFormat("MM/dd/yyyy-hh-mm-a");
-        String start = format.format(cluster.getValidity().getStart());
-        String [] parts = start.split("-");
-        getStartDate().clear();
-        sendKeysSlowly(getStartDate(), parts[0]);
-        getStartHours().clear();
-        sendKeysSlowly(getStartHours(), parts[1]);
-        getStartMinutes().clear();
-        sendKeysSlowly(getStartMinutes(), parts[2]);
-        String meredian = getStartMeredian().getText();
-        if (!meredian.equals(parts[3])) {
-            getStartMeredian().click();
-        }
-        String end = format.format(cluster.getValidity().getEnd());
-        parts = end.split("-");
-        getEndDate().clear();
-        sendKeysSlowly(getEndDate(), parts[0]);
-        getEndHours().clear();
-        sendKeysSlowly(getEndHours(), parts[1]);
-        getEndMinutes().clear();
-        sendKeysSlowly(getEndMinutes(), parts[2]);
-        meredian = getEndMeredian().getText();
-        if (!meredian.equals(parts[3])) {
-            getEndMeredian().click();
-        }
-    }
-
-    public void selectCluster(String clusterName) {
-        getClusterSelect().selectByValue(clusterName);
-    }
-
-    public String getClusterName(int indx) {
-        List<WebElement> blocks = formBox.findElements(By.xpath("//div[contains(@class, 'processCluster')]"));
-        return new Select(blocks.get(indx).findElement(By.tagName("select")))
-            .getFirstSelectedOption().getText();
-    }
-
-    public int getWizardClusterCount() {
-        return formBox.findElements(By.xpath("//div[contains(@class, 'processCluster')]")).size();
-    }
-
-    public void setProcessClustersInfo(ProcessMerlin process) {
-        for (int i = 0; i < process.getClusters().getClusters().size(); i++) {
-            if (i >= 1) {
-                clickAddClusterButton();
-            }
-            setCluster(process.getClusters().getClusters().get(i));
-        }
-    }
-
-    public List<String> getClustersFromDropDown() {
-        return getDropdownValues(getClusterSelect());
-    }
-
-    public void clickOnValidityStart() {
-        getStartDate().click();
-        List<WebElement> calendars = formBox.findElements(By.xpath("//ul[@ng-model='date']"));
-        waitForAngularToFinish();
-        Assert.assertTrue(calendars.get(calendars.size() - 2).isDisplayed(), "Calendar should pop up.");
-    }
-
-    public void clickOnValidityEnd() {
-        getEndDate().click();
-        List<WebElement> calendars = formBox.findElements(By.xpath("//ul[@ng-model='date']"));
-        waitForAngularToFinish();
-        Assert.assertTrue(calendars.get(calendars.size() - 1).isDisplayed(), "Calendar should pop up.");
-    }
-
-    /* Step 4 - Inputs & Outputs*/
-
-    private WebElement getAddInputButton() {
-        return formBox.findElement(By.xpath("//button[contains(., 'add input')]"));
-    }
-
-    private WebElement getAddOutputButton() {
-        return formBox.findElement(By.xpath("//button[contains(., 'add output')]"));
-    }
-
-    private WebElement getDeleteInputButton() {
-        return formBox.findElement(By.xpath("//button[contains(., 'delete')]"));
-    }
-
-    private WebElement getInputName(int index) {
-        return formBox.findElements(By.xpath("//input[@ng-model='input.name']")).get(index);
-    }
-
-    private Select getInputFeed(int index) {
-        return new Select(formBox.findElements(By.xpath("//select[@ng-model='input.feed']")).get(index));
-    }
-
-    private WebElement getInputStart(int index) {
-        return formBox.findElements(By.xpath("//input[@ng-model='input.start']")).get(index);
-    }
-
-    private WebElement getInputEnd(int index) {
-        return formBox.findElements(By.xpath("//input[@ng-model='input.end']")).get(index);
-    }
-
-    public void setInputInfo(Inputs inputs){
-        for (int i = 0; i < inputs.getInputs().size(); i++) {
-            clickAddInput();
-            sendKeysSlowly(getInputName(i), inputs.getInputs().get(i).getName());
-            getInputFeed(i).selectByVisibleText(inputs.getInputs().get(i).getFeed());
-            sendKeysSlowly(getInputStart(i), inputs.getInputs().get(i).getStart());
-            sendKeysSlowly(getInputEnd(i), inputs.getInputs().get(i).getEnd());
-            clickCheckBoxSecurely(getOptionalCheckbox(), inputs.getInputs().get(i).isOptional());
-        }
-    }
-
-    private WebElement getOptionalCheckbox() {
-        return formBox.findElement(By.xpath("//input[@ng-model='input.optional']"));
-    }
-
-    public boolean isOptionalSelected() {
-        return getOptionalCheckbox().isSelected();
-    }
-
-    public void clickAddInput(){
-        waitForAngularToFinish();
-        getAddInputButton().click();
-    }
-
-    public void clickAddOutput(){
-        waitForAngularToFinish();
-        getAddOutputButton().click();
-    }
-
-    public void clickDeleteInput(){
-        getDeleteInputButton().click();
-    }
-
-    private WebElement getDeleteOutputButton() {
-        return formBox.findElement(By.xpath("//button[contains(., 'delete')]"));
-    }
-
-    private WebElement getOutputName(int index) {
-        return formBox.findElements(By.xpath("//input[@ng-model='output.name']")).get(index);
-    }
-
-    private Select getOutputFeed(int index) {
-        return new Select(formBox.findElements(By.xpath("//select[@ng-model='output.feed']")).get(index));
-    }
-
-    private WebElement getOutputInstance(int index) {
-        return formBox.findElements(By.xpath("//input[@ng-model='output.outputInstance']")).get(index);
-    }
-
-    public void clickDeleteOutput(){
-        getDeleteOutputButton().click();
-    }
-
-    public void setOutputInfo(Outputs outputs){
-        for (int i = 0; i < outputs.getOutputs().size(); i++) {
-            clickAddOutput();
-            sendKeysSlowly(getOutputName(i), outputs.getOutputs().get(i).getName());
-            getOutputFeed(i).selectByVisibleText(outputs.getOutputs().get(i).getFeed());
-            sendKeysSlowly(getOutputInstance(i), outputs.getOutputs().get(i).getInstance());
-        }
-    }
-
-    public void setInputOutputInfo(ProcessMerlin process){
-        setInputInfo(process.getInputs());
-        setOutputInfo(process.getOutputs());
-    }
-
-    public List<String> getInputValues(int index){
-        return getDropdownValues(getInputFeed(index));
-    }
-
-    public List<String> getOutputValues(int index){
-        return getDropdownValues(getOutputFeed(index));
-    }
-
-    public String getInputNameText(int index){
-        return getInputName(index).getAttribute("value");
-    }
-
-    public String getInputFeedText(int index){
-        return getInputFeed(index).getFirstSelectedOption().getAttribute("value");
-    }
-
-    public String getInputStartText(int index){
-        return getInputStart(index).getAttribute("value");
-    }
-
-    public String getInputEndText(int index){
-        return getInputEnd(index).getAttribute("value");
-    }
-
-    public String getOutputNameText(int index){
-        return getOutputName(index).getAttribute("value");
-    }
-
-    public String getOutputFeedText(int index){
-        return getOutputFeed(index).getFirstSelectedOption().getAttribute("value");
-    }
-
-    public String getOutputInstanceText(int index){
-        return getOutputInstance(index).getAttribute("value");
-    }
-
-    public void isInputNameDisplayed(int index, boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getInputName(index), "Input Name " + index);
-        }else {
-            try{
-                getInputName(index);
-                Assert.fail("Input Name " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Input Name " + index + " not found");
-            }
-        }
-    }
-
-    public void isOutputNameDisplayed(int index, boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getOutputName(index), "Output Name " + index);
-        }else {
-            try{
-                getOutputName(index);
-                Assert.fail("Output Name " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Output Name " + index + " not found");
-            }
-        }
-    }
-
-
-    /* Step 5 - Summary */
-
-    public void clickSave(){
-        getSaveProcessButton().click();
-        waitForAlert();
-    }
-
-    @Override
-    public ProcessMerlin getEntityFromXMLPreview() {
-        return new ProcessMerlin(getXMLPreview());
-    }
-
-    @Override
-    public WebElement getEditXMLButton() {
-        return driver.findElement(By.id("editXmlButton"));
-    }
-
-    /**
-     * Method gets text from summary box and parses it to ProcessMerlin object.
-     * @param draft empty ProcessMerlin object
-     */
-    public ProcessMerlin getProcessFromSummaryBox(ProcessMerlin draft) {
-        String text = summaryBox.getText().trim();
-        draft.setName(getProperty(text, null, "Tags", 2));
-        String currentBlock = text.substring(text.indexOf("Tags"), text.indexOf("Access Control List"));
-        String [] parts;
-        parts = currentBlock.trim().split("\\n");
-        String tags = "";
-        for (int i = 1; i < parts.length; i++) {
-            String tag = parts[i];
-            if (!tag.contains("No tags")) {
-                tag = tag.replace(" ", "");
-                tags = tags + (tags.isEmpty() ? tag : "," + tag);
-            }
-        }
-        if (!tags.isEmpty()) {
-            draft.setTags(tags);
-        }
-
-        Workflow workflow = new Workflow();
-        workflow.setName(getProperty(text, "Workflow", "Engine", 2));
-        workflow.setEngine(EngineType.fromValue(getProperty(text, "Engine", "Version", 1)));
-        workflow.setVersion(getProperty(text, "Version", "Path", 1));
-        workflow.setPath(getProperty(text, "Path", "Timing", 1));
-        draft.setWorkflow(workflow);
-
-        draft.setTimezone(TimeZone.getTimeZone(getProperty(text, "Timing", "Frequency", 2)));
-        parts = getProperty(text, "Frequency", "Max. parallel instances", 1).split(" ");
-        draft.setFrequency(new Frequency(parts[1], Frequency.TimeUnit.valueOf(parts[2])));
-        draft.setParallel(Integer.parseInt(getProperty(text, "Max. parallel instances", "Order", 1)));
-        draft.setOrder(ExecutionType.fromValue(getProperty(text, "Order", "Retry", 1)));
-
-        String aclOwner = getProperty(text, "Owner", "Group", 1);
-        String aclGroup = getProperty(text, "Group", "Permissions", 1);
-        String aclPermission = getProperty(text, "Permissions", "Workflow", 1);
-        draft.setACL(aclOwner, aclGroup, aclPermission);
-
-        Retry retry = new Retry();
-        retry.setPolicy(PolicyType.fromValue(getProperty(text, "Retry", "Attempts", 2)));
-        retry.setAttempts(Integer.parseInt(getProperty(text, "Attempts", "Delay", 1)));
-        parts = getProperty(text, "Delay", "Clusters", 1).split(" ");
-        retry.setDelay(new Frequency(parts[2], Frequency.TimeUnit.valueOf(parts[3])));
-        draft.setRetry(retry);
-
-        //get clusters
-        currentBlock = text.substring(text.indexOf("Clusters"), text.indexOf("Inputs"));
-        int last = 0;
-        while (last != -1) {
-            Cluster cluster = new Cluster();
-            cluster.setName(getProperty(currentBlock, "Name", "Validity", 1));
-            //remove the part which was used
-            currentBlock = currentBlock.substring(currentBlock.indexOf("Validity"));
-            //get validity
-            String start = getProperty(currentBlock, "Validity", "End", 2);
-            //check if there are other clusters
-            last = currentBlock.indexOf("Name");
-            String innerBlock = currentBlock.substring(currentBlock.indexOf("End"),
-                last != -1 ? last : currentBlock.length() - 1).trim();
-            String end = innerBlock.trim().split("\\n")[1];
-            Validity validity = new Validity();
-            DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy'-'MM'-'dd' 'HH':'mm'");
-            validity.setStart(formatter.parseDateTime(start.replaceAll("\"", "")).toDate());
-            validity.setEnd(formatter.parseDateTime(end.replaceAll("\"", "")).toDate());
-            cluster.setValidity(validity);
-            draft.getClusters().getClusters().add(cluster);
-        }
-        //get inputs
-        currentBlock = text.substring(text.indexOf("Inputs"), text.indexOf("Outputs"));
-        last = 0;
-        while (last != -1) {
-            Input input = new Input();
-            //get input name
-            input.setName(getProperty(currentBlock, "Name", "Feed", 1));
-            //remove the part which was used
-            currentBlock = currentBlock.substring(currentBlock.indexOf("Name") + 4);
-            //get input feed
-            input.setFeed(getProperty(currentBlock, "Feed", "Instance", 1));
-            //get input start
-            input.setStart(getProperty(currentBlock, "Instance", "End", 2));
-            //get input end
-            last = currentBlock.indexOf("Name");
-            String innerBlock = currentBlock.substring(currentBlock.indexOf("End"),
-                last != -1 ? last : currentBlock.length() - 1).trim();
-            parts = innerBlock.trim().split("\\n");
-            input.setEnd(parts[1]);
-            draft.getInputs().getInputs().add(input);
-            //remove part which was parsed
-            currentBlock = currentBlock.substring(currentBlock.indexOf("End") + 4);
-        }
-        //get outputs
-        currentBlock = text.substring(text.indexOf("Outputs"));
-        last = 0;
-        while (last != -1) {
-            Output output = new Output();
-            output.setName(getProperty(currentBlock, "Name", "Feed", 1));
-            //remove the part which was used
-            currentBlock = currentBlock.substring(currentBlock.indexOf("Feed"));
-            //get feed
-            output.setFeed(getProperty(currentBlock, "Feed", "Instance", 1));
-            last = currentBlock.indexOf("Name");
-            output.setInstance(getProperty(currentBlock, "Instance", "Name", 2));
-            draft.getOutputs().getOutputs().add(output);
-        }
-        //check compulsory process properties
-        Assert.assertNotNull(draft.getACL(), "ACL is empty (null).");
-        return draft;
-    }
-
-    /**
-     * Retrieves property from source text.
-     */
-    private String getProperty(String block, String start, String end, int propertyIndex) {
-        int s = start != null ? block.indexOf(start) : 0;
-        s = s == -1 ? 0 : s;
-        int e = end != null ? block.indexOf(end) : block.length() - 1;
-        e = e == -1 ? block.length() : e;
-        String subBlock = block.substring(s, e).trim();
-        String [] parts = subBlock.trim().split("\\n");
-        return parts.length - 1 >= propertyIndex ? parts[propertyIndex].trim() : null;
-    }
-}


[25/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-schedulerinfo-1.json
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-schedulerinfo-1.json b/common/src/test/resources/config/feed/feed-schedulerinfo-1.json
deleted file mode 100644
index 6525c7d..0000000
--- a/common/src/test/resources/config/feed/feed-schedulerinfo-1.json
+++ /dev/null
@@ -1,276 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-{
-    "scheduler": {
-        "schedulerInfo": {
-            "capacity": 100.0,
-            "maxCapacity": 100.0,
-            "queueName": "root",
-            "queues": {
-                "queue": [
-                    {
-                        "absoluteCapacity": 10.5,
-                        "absoluteMaxCapacity": 50.0,
-                        "absoluteUsedCapacity": 0.0,
-                        "capacity": 10.5,
-                        "maxCapacity": 50.0,
-                        "numApplications": 0,
-                        "queueName": "a",
-                        "queues": {
-                            "queue": [
-                                {
-                                    "absoluteCapacity": 3.15,
-                                    "absoluteMaxCapacity": 25.0,
-                                    "absoluteUsedCapacity": 0.0,
-                                    "capacity": 30.000002,
-                                    "maxCapacity": 50.0,
-                                    "numApplications": 0,
-                                    "queueName": "a1",
-                                    "queues": {
-                                        "queue": [
-                                            {
-                                                "absoluteCapacity": 2.6775,
-                                                "absoluteMaxCapacity": 25.0,
-                                                "absoluteUsedCapacity": 0.0,
-                                                "capacity": 85.0,
-                                                "maxActiveApplications": 1,
-                                                "maxActiveApplicationsPerUser": 1,
-                                                "maxApplications": 267,
-                                                "maxApplicationsPerUser": 267,
-                                                "maxCapacity": 100.0,
-                                                "numActiveApplications": 0,
-                                                "numApplications": 0,
-                                                "numContainers": 0,
-                                                "numPendingApplications": 0,
-                                                "queueName": "a1a",
-                                                "resourcesUsed": {
-                                                    "memory": 0,
-                                                    "vCores": 0
-                                                },
-                                                "state": "RUNNING",
-                                                "type": "capacitySchedulerLeafQueueInfo",
-                                                "usedCapacity": 0.0,
-                                                "usedResources": "<memory:0, vCores:0>",
-                                                "userLimit": 100,
-                                                "userLimitFactor": 1.0,
-                                                "users": null
-                                            },
-                                            {
-                                                "absoluteCapacity": 0.47250003,
-                                                "absoluteMaxCapacity": 25.0,
-                                                "absoluteUsedCapacity": 0.0,
-                                                "capacity": 15.000001,
-                                                "maxActiveApplications": 1,
-                                                "maxActiveApplicationsPerUser": 1,
-                                                "maxApplications": 47,
-                                                "maxApplicationsPerUser": 47,
-                                                "maxCapacity": 100.0,
-                                                "numActiveApplications": 0,
-                                                "numApplications": 0,
-                                                "numContainers": 0,
-                                                "numPendingApplications": 0,
-                                                "queueName": "a1b",
-                                                "resourcesUsed": {
-                                                    "memory": 0,
-                                                    "vCores": 0
-                                                },
-                                                "state": "RUNNING",
-                                                "type": "capacitySchedulerLeafQueueInfo",
-                                                "usedCapacity": 0.0,
-                                                "usedResources": "<memory:0, vCores:0>",
-                                                "userLimit": 100,
-                                                "userLimitFactor": 1.0,
-                                                "users": null
-                                            }
-                                        ]
-                                    },
-                                    "resourcesUsed": {
-                                        "memory": 0,
-                                        "vCores": 0
-                                    },
-                                    "state": "RUNNING",
-                                    "usedCapacity": 0.0,
-                                    "usedResources": "<memory:0, vCores:0>"
-                                },
-                                {
-                                    "absoluteCapacity": 7.35,
-                                    "absoluteMaxCapacity": 50.0,
-                                    "absoluteUsedCapacity": 0.0,
-                                    "capacity": 70.0,
-                                    "maxActiveApplications": 1,
-                                    "maxActiveApplicationsPerUser": 100,
-                                    "maxApplications": 735,
-                                    "maxApplicationsPerUser": 73500,
-                                    "maxCapacity": 100.0,
-                                    "numActiveApplications": 0,
-                                    "numApplications": 0,
-                                    "numContainers": 0,
-                                    "numPendingApplications": 0,
-                                    "queueName": "a2",
-                                    "resourcesUsed": {
-                                        "memory": 0,
-                                        "vCores": 0
-                                    },
-                                    "state": "RUNNING",
-                                    "type": "capacitySchedulerLeafQueueInfo",
-                                    "usedCapacity": 0.0,
-                                    "usedResources": "<memory:0, vCores:0>",
-                                    "userLimit": 100,
-                                    "userLimitFactor": 100.0,
-                                    "users": null
-                                }
-                            ]
-                        },
-                        "resourcesUsed": {
-                            "memory": 0,
-                            "vCores": 0
-                        },
-                        "state": "RUNNING",
-                        "usedCapacity": 0.0,
-                        "usedResources": "<memory:0, vCores:0>"
-                    },
-                    {
-                        "absoluteCapacity": 89.5,
-                        "absoluteMaxCapacity": 100.0,
-                        "absoluteUsedCapacity": 0.0,
-                        "capacity": 89.5,
-                        "maxCapacity": 100.0,
-                        "numApplications": 2,
-                        "queueName": "b",
-                        "queues": {
-                            "queue": [
-                                {
-                                    "absoluteCapacity": 53.7,
-                                    "absoluteMaxCapacity": 100.0,
-                                    "absoluteUsedCapacity": 0.0,
-                                    "capacity": 60.000004,
-                                    "maxActiveApplications": 1,
-                                    "maxActiveApplicationsPerUser": 100,
-                                    "maxApplications": 5370,
-                                    "maxApplicationsPerUser": 537000,
-                                    "maxCapacity": 100.0,
-                                    "numActiveApplications": 1,
-                                    "numApplications": 2,
-                                    "numContainers": 0,
-                                    "numPendingApplications": 1,
-                                    "queueName": "b1",
-                                    "resourcesUsed": {
-                                        "memory": 0,
-                                        "vCores": 0
-                                    },
-                                    "state": "RUNNING",
-                                    "type": "capacitySchedulerLeafQueueInfo",
-                                    "usedCapacity": 0.0,
-                                    "usedResources": "<memory:0, vCores:0>",
-                                    "userLimit": 100,
-                                    "userLimitFactor": 100.0,
-                                    "users": {
-                                        "user": [
-                                            {
-                                                "numActiveApplications": 0,
-                                                "numPendingApplications": 1,
-                                                "resourcesUsed": {
-                                                    "memory": 0,
-                                                    "vCores": 0
-                                                },
-                                                "username": "user2"
-                                            },
-                                            {
-                                                "numActiveApplications": 1,
-                                                "numPendingApplications": 0,
-                                                "resourcesUsed": {
-                                                    "memory": 0,
-                                                    "vCores": 0
-                                                },
-                                                "username": "user1"
-                                            }
-                                        ]
-                                    }
-                                },
-                                {
-                                    "absoluteCapacity": 35.3525,
-                                    "absoluteMaxCapacity": 100.0,
-                                    "absoluteUsedCapacity": 0.0,
-                                    "capacity": 39.5,
-                                    "maxActiveApplications": 1,
-                                    "maxActiveApplicationsPerUser": 100,
-                                    "maxApplications": 3535,
-                                    "maxApplicationsPerUser": 353500,
-                                    "maxCapacity": 100.0,
-                                    "numActiveApplications": 0,
-                                    "numApplications": 0,
-                                    "numContainers": 0,
-                                    "numPendingApplications": 0,
-                                    "queueName": "b2",
-                                    "resourcesUsed": {
-                                        "memory": 0,
-                                        "vCores": 0
-                                    },
-                                    "state": "RUNNING",
-                                    "type": "capacitySchedulerLeafQueueInfo",
-                                    "usedCapacity": 0.0,
-                                    "usedResources": "<memory:0, vCores:0>",
-                                    "userLimit": 100,
-                                    "userLimitFactor": 100.0,
-                                    "users": null
-                                },
-                                {
-                                    "absoluteCapacity": 0.4475,
-                                    "absoluteMaxCapacity": 100.0,
-                                    "absoluteUsedCapacity": 0.0,
-                                    "capacity": 0.5,
-                                    "maxActiveApplications": 1,
-                                    "maxActiveApplicationsPerUser": 100,
-                                    "maxApplications": 44,
-                                    "maxApplicationsPerUser": 4400,
-                                    "maxCapacity": 100.0,
-                                    "numActiveApplications": 0,
-                                    "numApplications": 0,
-                                    "numContainers": 0,
-                                    "numPendingApplications": 0,
-                                    "queueName": "b3",
-                                    "resourcesUsed": {
-                                        "memory": 0,
-                                        "vCores": 0
-                                    },
-                                    "state": "RUNNING",
-                                    "type": "capacitySchedulerLeafQueueInfo",
-                                    "usedCapacity": 0.0,
-                                    "usedResources": "<memory:0, vCores:0>",
-                                    "userLimit": 100,
-                                    "userLimitFactor": 100.0,
-                                    "users": null
-                                }
-                            ]
-                        },
-                        "resourcesUsed": {
-                            "memory": 0,
-                            "vCores": 0
-                        },
-                        "state": "RUNNING",
-                        "usedCapacity": 0.0,
-                        "usedResources": "<memory:0, vCores:0>"
-                    }
-                ]
-            },
-            "type": "capacityScheduler",
-            "usedCapacity": 0.0
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-schedulerinfo-2.json
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-schedulerinfo-2.json b/common/src/test/resources/config/feed/feed-schedulerinfo-2.json
deleted file mode 100644
index d49bc81..0000000
--- a/common/src/test/resources/config/feed/feed-schedulerinfo-2.json
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-{"scheduler":{"schedulerInfo":{"type":"capacityScheduler","capacity":100.0,"usedCapacity":0.0,"maxCapacity":100.0,"queueName":"root","queues":{"queue":[{"type":"capacitySchedulerLeafQueueInfo","capacity":100.0,"usedCapacity":0.0,"maxCapacity":100.0,"absoluteCapacity":100.0,"absoluteMaxCapacity":100.0,"absoluteUsedCapacity":0.0,"numApplications":0,"queueName":"default","state":"RUNNING","resourcesUsed":{"memory":0,"vCores":0},"hideReservationQueues":false,"nodeLabels":["*"],"numActiveApplications":0,"numPendingApplications":0,"numContainers":0,"maxApplications":10000,"maxApplicationsPerUser":10000,"userLimit":100,"users":null,"userLimitFactor":1.0,"AMResourceLimit":{"memory":2046,"vCores":1},"usedAMResource":{"memory":0,"vCores":0},"userAMResourceLimit":{"memory":2046,"vCores":1},"preemptionDisabled":true}]},"health":{"lastrun":1451957838430,"operationsInfo":{"entry":{"key":"last-preemption","value":{"nodeId":"N/A","containerId":"N/A","queue":"N/A"}},"entry":{"key":"last-reservation"
 ,"value":{"nodeId":"N/A","containerId":"N/A","queue":"N/A"}},"entry":{"key":"last-allocation","value":{"nodeId":"c6401.ambari.apache.org:45454","containerId":"container_e11_1450120354929_5040_01_000002","queue":"root.default"}},"entry":{"key":"last-release","value":{"nodeId":"c6401.ambari.apache.org:45454","containerId":"container_e11_1450120354929_5040_01_000001","queue":"root.default"}}},"lastRunDetails":[{"operation":"releases","count":0,"resources":{"memory":0,"vCores":0}},{"operation":"allocations","count":0,"resources":{"memory":0,"vCores":0}},{"operation":"reservations","count":0,"resources":{"memory":0,"vCores":0}}]}}}}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-tags-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-tags-0.1.xml b/common/src/test/resources/config/feed/feed-tags-0.1.xml
deleted file mode 100644
index 644f0f3..0000000
--- a/common/src/test/resources/config/feed/feed-tags-0.1.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="tags" xmlns="uri:falcon:feed:0.1">
-    <tags>   consumer = consumer@xyz.com, owner = producer@xyz.com, =forecasting   </tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/hive-table-feed-out.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/hive-table-feed-out.xml b/common/src/test/resources/config/feed/hive-table-feed-out.xml
deleted file mode 100644
index d13e538..0000000
--- a/common/src/test/resources/config/feed/hive-table-feed-out.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks summary table " name="clicks-summary-table" xmlns="uri:falcon:feed:0.1">
-    <groups>online,bi</groups>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source" partition="*/${cluster.colo}">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <table uri="catalog:testCluster:clicks-summary#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <table uri="catalog:backupCluster:clicks-summary#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:default:clicks-summary#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/hive-table-feed.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/hive-table-feed.xml b/common/src/test/resources/config/feed/hive-table-feed.xml
deleted file mode 100644
index 7be20ce..0000000
--- a/common/src/test/resources/config/feed/hive-table-feed.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log table" name="clicks-table" xmlns="uri:falcon:feed:0.1">
-    <groups>online,bi</groups>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <clusters>
-        <cluster name="testCluster" type="source" partition="*/${cluster.colo}">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/invalid-feed.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/invalid-feed.xml b/common/src/test/resources/config/feed/invalid-feed.xml
deleted file mode 100644
index 1dcc14a..0000000
--- a/common/src/test/resources/config/feed/invalid-feed.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1">
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source" partition="*/${cluster.colo}">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-    <table uri="catalog:default:clicks#ds=$YEAR-$MONTH-$DAY-$HOUR" />
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/table-with-multiple-sources-feed.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/table-with-multiple-sources-feed.xml b/common/src/test/resources/config/feed/table-with-multiple-sources-feed.xml
deleted file mode 100644
index 0c8eba9..0000000
--- a/common/src/test/resources/config/feed/table-with-multiple-sources-feed.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1">
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-        <cluster name="testCluster" type="source">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:default:clicks#ds=$YEAR-$MONTH-$DAY-$HOUR" />
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/process/process-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/process/process-0.1.xml b/common/src/test/resources/config/process/process-0.1.xml
deleted file mode 100644
index 039208c..0000000
--- a/common/src/test/resources/config/process/process-0.1.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<process name="sample" xmlns="uri:falcon:process:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <pipelines>testPipeline,dataReplication_Pipeline</pipelines>
-    <clusters>
-        <cluster name="testCluster">
-            <validity start="2011-11-02T00:00Z" end="2091-12-30T00:00Z"/>
-        </cluster>
-    </clusters>
-    <parallel>1</parallel>
-    <order>LIFO</order>
-    <frequency>hours(1)</frequency>
-    <sla shouldStartIn="hours(2)" shouldEndIn="hours(4)"/>
-
-    <!-- what -->
-    <inputs>
-        <input name="impression" feed="impressionFeed" start="today(0,0)" end="today(2,0)" partition="*/US"/>
-        <input name="clicks" feed="clicksFeed" start="yesterday(0,0)" end="yesterday(20,0)"/>
-    </inputs>
-
-    <outputs>
-        <output name="impOutput" feed="imp-click-join1" instance="today(0,0)"/>
-        <output name="clicksOutput" feed="imp-click-join2" instance="today(0,0)"/>
-    </outputs>
-
-    <!-- how -->
-    <properties>
-        <property name="name1" value="value1"/>
-        <property name="name2" value="value2"/>
-    </properties>
-
-    <workflow engine="oozie" path="/falcon/test/workflow"/>
-
-    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
-
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="impression" workflow-path="himpression/late/workflow"/>
-        <late-input input="clicks" workflow-path="hdfs://clicks/late/workflow"/>
-    </late-process>
-
-    <notification type="email" to="falcon@localhost"/>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/process/process-0.2.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/process/process-0.2.xml b/common/src/test/resources/config/process/process-0.2.xml
deleted file mode 100644
index e1d5113..0000000
--- a/common/src/test/resources/config/process/process-0.2.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<process name="sample" xmlns="uri:falcon:process:0.1">
-    <clusters>
-        <cluster name="testCluster">
-            <validity start="2011-11-02T00:00Z" end="2011-12-30T00:00Z"/>
-        </cluster>
-    </clusters>
-    <!-- when -->
-    <parallel>1</parallel>
-    <order>LIFO</order>
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <!-- what -->
-    <inputs>
-        <input name="impression" feed="impressionFeed" start="today(0,0)"
-               end="today(2,0)" partition="*/US"/>
-        <input name="clicks" feed="clicks" start="yesterday(0,0)"
-               end="yesterday(20,0)"/>
-    </inputs>
-
-    <outputs>
-        <output name="impOutput" feed="imp-click-join1" instance="today(0,0)"/>
-        <output name="clicksOutput" feed="imp-click-join2" instance="today(0,0)"/>
-    </outputs>
-
-    <!-- how -->
-    <properties>
-        <property name="name1" value="value1"/>
-        <property name="name2" value="value2"/>
-    </properties>
-
-    <workflow engine="oozie" path="/falcon/test/workflow"/>
-
-    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
-
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="impression" workflow-path="himpression/late/workflow"/>
-        <late-input input="clicks" workflow-path="hdfs://clicks/late/workflow"/>
-    </late-process>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/process/process-bad-pipeline.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/process/process-bad-pipeline.xml b/common/src/test/resources/config/process/process-bad-pipeline.xml
deleted file mode 100644
index e506bd9..0000000
--- a/common/src/test/resources/config/process/process-bad-pipeline.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<process name="sample" xmlns="uri:falcon:process:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-    <pipelines>etl-pipeline fetl-pipeline, ,</pipelines>
-    <clusters>
-        <cluster name="testCluster">
-            <validity start="2011-11-02T00:00Z" end="2091-12-30T00:00Z"/>
-        </cluster>
-    </clusters>
-    <parallel>1</parallel>
-    <order>LIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <!-- what -->
-    <inputs>
-        <input name="impression" feed="impressionFeed" start="today(0,0)" end="today(2,0)" partition="*/US"/>
-        <input name="clicks" feed="clicksFeed" start="yesterday(0,0)" end="yesterday(20,0)"/>
-    </inputs>
-
-    <outputs>
-        <output name="impOutput" feed="imp-click-join1" instance="today(0,0)"/>
-        <output name="clicksOutput" feed="imp-click-join2" instance="today(0,0)"/>
-    </outputs>
-
-    <!-- how -->
-    <properties>
-        <property name="name1" value="value1"/>
-        <property name="name2" value="value2"/>
-    </properties>
-
-    <workflow engine="oozie" path="/falcon/test/workflow"/>
-
-    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
-
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="impression" workflow-path="himpression/late/workflow"/>
-        <late-input input="clicks" workflow-path="hdfs://clicks/late/workflow"/>
-    </late-process>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/process/process-invalid.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/process/process-invalid.xml b/common/src/test/resources/config/process/process-invalid.xml
deleted file mode 100644
index d98a6e9..0000000
--- a/common/src/test/resources/config/process/process-invalid.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<Process name="">
-    <clusters>
-        <cluster name="">cluster</cluster>
-    </clusters>
-    <frequency>frequency</frequency>
-    <periodicity>periodicity</periodicity>
-    <validity end="" start="">validity</validity>
-    <inputs>
-        <somenode>somenode</somenode>
-        <input end="" feed="" start="">input</input>
-    </inputs>
-    <outputs>
-        <output feed="" instance="">output</output>
-    </outputs>
-    <workflow engine="" path="">workflow</workflow>
-    <retry attempts="" delay="" delayUnit="" policy="">retry</retry>
-    <late-process delay="" delayUnit="" policy="">
-        <late-input input="" workflow-path="">late-input</late-input>
-    </late-process>
-</Process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/process/process-table.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/process/process-table.xml b/common/src/test/resources/config/process/process-table.xml
deleted file mode 100644
index f9d8aa5..0000000
--- a/common/src/test/resources/config/process/process-table.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<process name="table-process" xmlns="uri:falcon:process:0.1">
-    <!-- where -->
-    <clusters>
-        <cluster name="testCluster">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-        </cluster>
-    </clusters>
-
-    <!-- when -->
-    <parallel>1</parallel>
-    <order>LIFO</order>
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <!-- what -->
-    <inputs>
-        <input name="input" feed="clicks-table" start="today(0,0)" end="today(20,0)"/>
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="clicks-summary-table" instance="today(0,0)"/>
-    </outputs>
-
-    <!-- how -->
-    <workflow engine="oozie" path="/falcon/test/workflow"/>
-
-    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
-
-    <!-- ACL -->
-    <ACL owner="falcon-ut-user" group="falcon" permission="*"/>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/deploy.properties
----------------------------------------------------------------------
diff --git a/common/src/test/resources/deploy.properties b/common/src/test/resources/deploy.properties
deleted file mode 100644
index 74e1ba3..0000000
--- a/common/src/test/resources/deploy.properties
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=unittest

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/runtime.properties
----------------------------------------------------------------------
diff --git a/common/src/test/resources/runtime.properties b/common/src/test/resources/runtime.properties
deleted file mode 100644
index f76ff51..0000000
--- a/common/src/test/resources/runtime.properties
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=unittest
-
-unittest.log.cleanup.frequency.minutes.retention=500
-unittest.log.cleanup.frequency.hours.retention=500
-unittest.log.cleanup.frequency.days.retention=5000
-unittest.log.cleanup.frequency.months.retention=500
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/distro/pom.xml
----------------------------------------------------------------------
diff --git a/distro/pom.xml b/distro/pom.xml
deleted file mode 100644
index e3e3a5f..0000000
--- a/distro/pom.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-distro</artifactId>
-    <description>Apache Falcon Distro</description>
-    <name>Apache Falcon Distro</name>
-    <packaging>pom</packaging>
-      <profiles>
-         <profile>
-            <id>distributed</id>
-             <build>
-                <plugins>
-                    <plugin>
-                        <artifactId>maven-assembly-plugin</artifactId>
-                         <configuration>
-                            <descriptors>
-                                <descriptor>../src/main/assemblies/distributed-package.xml</descriptor>
-                                <descriptor>../src/main/assemblies/src-package.xml</descriptor>
-                            </descriptors>
-                            <finalName>apache-falcon-distributed-${project.version}</finalName>
-                        </configuration>
-                         <executions>
-                           <execution>
-                             <id>dist-assembly</id>
-                             <phase>package</phase>
-                             <goals>
-                               <goal>single</goal>
-                             </goals>
-                          </execution>
-                        </executions>
-                    </plugin>
-                </plugins>
-            </build>
-        </profile>
-     </profiles>
-          
-           <build>
-             <plugins>
-               <plugin>
-                 <artifactId>maven-assembly-plugin</artifactId>
-                 <configuration>
-                    <descriptors>
-                        <descriptor>../src/main/assemblies/standalone-package.xml</descriptor>
-                        <descriptor>../src/main/assemblies/src-package.xml</descriptor>
-                    </descriptors>
-                    <finalName>apache-falcon-${project.version}</finalName>
-                </configuration>
-                <executions>
-                   <execution>
-                     <id>dist-assembly</id>
-                     <phase>package</phase>
-                     <goals>
-                        <goal>single</goal>
-                     </goals>
-                  </execution>
-                </executions>
-             </plugin>
-           </plugins>
-         </build>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/angular-ui-bootstrap-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/angular-ui-bootstrap-LICENSE.txt b/docs/license/angular-ui-bootstrap-LICENSE.txt
deleted file mode 100644
index f61feea..0000000
--- a/docs/license/angular-ui-bootstrap-LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2012-2015 the AngularUI Team, https://github.com/organizations/angular-ui/teams/291112
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/angular-ui-router-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/angular-ui-router-LICENSE.txt b/docs/license/angular-ui-router-LICENSE.txt
deleted file mode 100644
index 6413b09..0000000
--- a/docs/license/angular-ui-router-LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2013-2015 The AngularUI Team, Karsten Sperling
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/angularJS-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/angularJS-LICENSE.txt b/docs/license/angularJS-LICENSE.txt
deleted file mode 100644
index fbd4c1d..0000000
--- a/docs/license/angularJS-LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License
-
-Copyright (c) 2010-2015 Google, Inc. http://angularjs.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/bootstrap-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/bootstrap-LICENSE.txt b/docs/license/bootstrap-LICENSE.txt
deleted file mode 100644
index 51fca54..0000000
--- a/docs/license/bootstrap-LICENSE.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/d3-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/d3-LICENSE.txt b/docs/license/d3-LICENSE.txt
deleted file mode 100644
index 0bc47f3..0000000
--- a/docs/license/d3-LICENSE.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2013, Michael Bostock
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-* The name Michael Bostock may not be used to endorse or promote products
-  derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/dagre-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/dagre-LICENSE.txt b/docs/license/dagre-LICENSE.txt
deleted file mode 100644
index d43325b..0000000
--- a/docs/license/dagre-LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2012 Chris Pettitt
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/dust-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/dust-LICENSE.txt b/docs/license/dust-LICENSE.txt
deleted file mode 100644
index 40a256e..0000000
--- a/docs/license/dust-LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2010 Aleksander Williams
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/dust-helpers-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/dust-helpers-LICENSE.txt b/docs/license/dust-helpers-LICENSE.txt
deleted file mode 100644
index 40a256e..0000000
--- a/docs/license/dust-helpers-LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2010 Aleksander Williams
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/entypo-font-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/entypo-font-LICENSE.txt b/docs/license/entypo-font-LICENSE.txt
deleted file mode 100644
index ab214f4..0000000
--- a/docs/license/entypo-font-LICENSE.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-
-This Font Software is licensed under the SIL Open Font License, Version 1.1.
-This license is copied below, and is also available with a FAQ at:
-http://scripts.sil.org/OFL
-
-
------------------------------------------------------------
-SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
------------------------------------------------------------
-
-PREAMBLE
-The goals of the Open Font License (OFL) are to stimulate worldwide
-development of collaborative font projects, to support the font creation
-efforts of academic and linguistic communities, and to provide a free and
-open framework in which fonts may be shared and improved in partnership
-with others.
-
-The OFL allows the licensed fonts to be used, studied, modified and
-redistributed freely as long as they are not sold by themselves. The
-fonts, including any derivative works, can be bundled, embedded, 
-redistributed and/or sold with any software provided that any reserved
-names are not used by derivative works. The fonts and derivatives,
-however, cannot be released under any other type of license. The
-requirement for fonts to remain under this license does not apply
-to any document created using the fonts or their derivatives.
-
-DEFINITIONS
-"Font Software" refers to the set of files released by the Copyright
-Holder(s) under this license and clearly marked as such. This may
-include source files, build scripts and documentation.
-
-"Reserved Font Name" refers to any names specified as such after the
-copyright statement(s).
-
-"Original Version" refers to the collection of Font Software components as
-distributed by the Copyright Holder(s).
-
-"Modified Version" refers to any derivative made by adding to, deleting,
-or substituting -- in part or in whole -- any of the components of the
-Original Version, by changing formats or by porting the Font Software to a
-new environment.
-
-"Author" refers to any designer, engineer, programmer, technical
-writer or other person who contributed to the Font Software.
-
-PERMISSION & CONDITIONS
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Font Software, to use, study, copy, merge, embed, modify,
-redistribute, and sell modified and unmodified copies of the Font
-Software, subject to the following conditions:
-
-1) Neither the Font Software nor any of its individual components,
-in Original or Modified Versions, may be sold by itself.
-
-2) Original or Modified Versions of the Font Software may be bundled,
-redistributed and/or sold with any software, provided that each copy
-contains the above copyright notice and this license. These can be
-included either as stand-alone text files, human-readable headers or
-in the appropriate machine-readable metadata fields within text or
-binary files as long as those fields can be easily viewed by the user.
-
-3) No Modified Version of the Font Software may use the Reserved Font
-Name(s) unless explicit written permission is granted by the corresponding
-Copyright Holder. This restriction only applies to the primary font name as
-presented to the users.
-
-4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
-Software shall not be used to promote, endorse or advertise any
-Modified Version, except to acknowledge the contribution(s) of the
-Copyright Holder(s) and the Author(s) or with their explicit written
-permission.
-
-5) The Font Software, modified or unmodified, in part or in whole,
-must be distributed entirely under this license, and must not be
-distributed under any other license. The requirement for fonts to
-remain under this license does not apply to any document created
-using the Font Software.
-
-TERMINATION
-This license becomes null and void if any of the above conditions are
-not met.
-
-DISCLAIMER
-THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
-OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
-DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
-OTHER DEALINGS IN THE FONT SOFTWARE.


[26/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/workflow/WorkflowExecutionContextTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/workflow/WorkflowExecutionContextTest.java b/common/src/test/java/org/apache/falcon/workflow/WorkflowExecutionContextTest.java
deleted file mode 100644
index c0bc252..0000000
--- a/common/src/test/java/org/apache/falcon/workflow/WorkflowExecutionContextTest.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.testng.Assert;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.Date;
-
-
-/**
- * A test for WorkflowExecutionContext.
- */
-public class WorkflowExecutionContextTest {
-
-    private static final String FALCON_USER = "falcon-user";
-    private static final String LOGS_DIR = "target/log";
-    private static final String NOMINAL_TIME = "2014-01-01-01-00";
-    private static final String OPERATION = "GENERATE";
-
-    private static final String CLUSTER_NAME = "primary-cluster";
-    private static final String ENTITY_NAME = "sample-process";
-    private static final String WORKFLOW_NAME = "imp-click-join-workflow";
-    private static final String WORKFLOW_VERSION = "1.0.9";
-
-    private static final String INPUT_FEED_NAMES = "impression-feed#clicks-feed";
-    private static final String INPUT_INSTANCE_PATHS =
-            "jail://global:00/falcon/impression-feed/2014/01/01,jail://global:00/falcon/impression-feed/2014/01/02"
-                    + "#jail://global:00/falcon/clicks-feed/2014-01-01";
-
-    private static final String OUTPUT_FEED_NAMES = "imp-click-join1,imp-click-join2";
-    private static final String OUTPUT_INSTANCE_PATHS =
-            "jail://global:00/falcon/imp-click-join1/20140101,jail://global:00/falcon/imp-click-join2/20140101";
-
-    private static final String BROKER = "org.apache.activemq.ActiveMQConnectionFactory";
-
-    private static final String ISO8601_TIME = SchemaHelper.formatDateUTCToISO8601(
-            NOMINAL_TIME, WorkflowExecutionContext.INSTANCE_FORMAT);
-
-    private WorkflowExecutionContext context;
-
-    @BeforeMethod
-    public void setUp() throws Exception {
-        context = WorkflowExecutionContext.create(getTestMessageArgs(),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-    }
-
-    @Test
-    public void testGetValue() throws Exception {
-        Assert.assertEquals(context.getValue(WorkflowExecutionArgs.ENTITY_NAME), ENTITY_NAME);
-    }
-
-    @Test
-    public void testGetValueWithDefault() throws Exception {
-        Assert.assertEquals(context.getValue(WorkflowExecutionArgs.TOPIC_NAME, "ABSENT"), "ABSENT");
-    }
-
-    @Test
-    public void testContainsKey() throws Exception {
-        Assert.assertTrue(context.containsKey(WorkflowExecutionArgs.ENTITY_NAME));
-        Assert.assertFalse(context.containsKey(WorkflowExecutionArgs.TOPIC_NAME));
-    }
-
-    @Test
-    public void testEntrySet() throws Exception {
-        Assert.assertNotNull(context.entrySet());
-    }
-
-    @Test
-    public void testHasWorkflowSucceeded() throws Exception {
-        Assert.assertTrue(context.hasWorkflowSucceeded());
-        Assert.assertEquals(context.getWorkflowStatus(), WorkflowExecutionContext.Status.SUCCEEDED);
-    }
-
-    @Test
-    public void testHasWorkflowFailed() throws Exception {
-        Assert.assertFalse(context.hasWorkflowFailed());
-    }
-
-    @Test
-    public void testGetContextFile() throws Exception {
-        Assert.assertEquals(context.getContextFile(),
-                WorkflowExecutionContext.getFilePath(context.getLogDir(), context.getEntityName(),
-                        context.getEntityType(), context.getOperation()));
-    }
-
-    @Test
-    public void testGetLogDir() throws Exception {
-        Assert.assertEquals(context.getLogDir(), LOGS_DIR);
-    }
-
-    @Test
-    public void testGetLogFile() throws Exception {
-        Assert.assertEquals(context.getLogFile(), LOGS_DIR + "/log.txt");
-    }
-
-    @Test
-    public void testGetNominalTime() throws Exception {
-        Assert.assertEquals(context.getNominalTime(), NOMINAL_TIME);
-    }
-
-    @Test
-    public void testGetNominalTimeAsISO8601() throws Exception {
-        Assert.assertEquals(context.getNominalTimeAsISO8601(), ISO8601_TIME);
-    }
-
-    @Test
-    public void testGetTimestamp() throws Exception {
-        Assert.assertEquals(context.getTimestamp(), NOMINAL_TIME);
-    }
-
-    @Test
-    public void testGetTimeStampAsISO8601() throws Exception {
-        Assert.assertEquals(context.getTimeStampAsISO8601(), ISO8601_TIME);
-    }
-
-    @Test
-    public void testGetClusterName() throws Exception {
-        Assert.assertEquals(context.getClusterName(), CLUSTER_NAME);
-    }
-
-    @Test
-    public void testGetEntityName() throws Exception {
-        Assert.assertEquals(context.getEntityName(), ENTITY_NAME);
-    }
-
-    @Test
-    public void testGetEntityType() throws Exception {
-        Assert.assertEquals(context.getEntityType(), "PROCESS");
-    }
-
-    @Test
-    public void testGetOperation() throws Exception {
-        Assert.assertEquals(context.getOperation().name(), OPERATION);
-    }
-
-    @Test
-    public void testGetOutputFeedNames() throws Exception {
-        Assert.assertEquals(context.getOutputFeedNames(), OUTPUT_FEED_NAMES);
-    }
-
-    @Test
-    public void testGetOutputFeedNamesList() throws Exception {
-        Assert.assertEquals(context.getOutputFeedNamesList(),
-                OUTPUT_FEED_NAMES.split(WorkflowExecutionContext.OUTPUT_FEED_SEPARATOR));
-    }
-
-    @Test
-    public void testGetOutputFeedInstancePaths() throws Exception {
-        Assert.assertEquals(context.getOutputFeedInstancePaths(), OUTPUT_INSTANCE_PATHS);
-    }
-
-    @Test
-    public void testGetOutputFeedInstancePathsList() throws Exception {
-        Assert.assertEquals(context.getOutputFeedInstancePathsList(),
-                OUTPUT_INSTANCE_PATHS.split(","));
-    }
-
-    @Test
-    public void testGetInputFeedNames() throws Exception {
-        Assert.assertEquals(context.getOutputFeedNames(), OUTPUT_FEED_NAMES);
-    }
-
-    @Test
-    public void testGetInputFeedNamesList() throws Exception {
-        Assert.assertEquals(context.getInputFeedNamesList(),
-                INPUT_FEED_NAMES.split(WorkflowExecutionContext.INPUT_FEED_SEPARATOR));
-    }
-
-    @Test
-    public void testGetInputFeedInstancePaths() throws Exception {
-        Assert.assertEquals(context.getInputFeedInstancePaths(), INPUT_INSTANCE_PATHS);
-    }
-
-    @Test
-    public void testGetInputFeedInstancePathsList() throws Exception {
-        Assert.assertEquals(context.getInputFeedInstancePathsList(),
-                INPUT_INSTANCE_PATHS.split("#"));
-    }
-
-    @Test
-    public void testGetWorkflowEngineUrl() throws Exception {
-        Assert.assertEquals(context.getWorkflowEngineUrl(), "http://localhost:11000/oozie");
-    }
-
-    @Test
-    public void testGetUserWorkflowEngine() throws Exception {
-        Assert.assertEquals(context.getUserWorkflowEngine(), EngineType.PIG.name());
-    }
-
-    @Test
-    public void testGetUserWorkflowVersion() throws Exception {
-        Assert.assertEquals(context.getUserWorkflowVersion(), WORKFLOW_VERSION);
-    }
-
-    @Test
-    public void testGetWorkflowId() throws Exception {
-        Assert.assertEquals(context.getWorkflowId(), "workflow-01-00");
-    }
-
-    @Test
-    public void testGetUserSubflowId() throws Exception {
-        Assert.assertEquals(context.getUserSubflowId(), "userflow@wf-id");
-    }
-
-    @Test
-    public void testGetWorkflowRunId() throws Exception {
-        Assert.assertEquals(context.getWorkflowRunId(), 1);
-    }
-
-    @Test
-    public void testGetWorkflowRunIdString() throws Exception {
-        Assert.assertEquals(context.getWorkflowRunIdString(), "1");
-    }
-
-    @Test
-    public void testGetWorkflowUser() throws Exception {
-        Assert.assertEquals(context.getWorkflowUser(), FALCON_USER);
-    }
-
-    @Test
-    public void testGetExecutionCompletionTime() throws Exception {
-        Assert.assertNotNull(context.getExecutionCompletionTime());
-    }
-
-    @Test
-    public void testWorkflowStartEnd() throws Exception {
-        Assert.assertEquals(context.getWorkflowEndTime() - context.getWorkflowStartTime(), 1000000);
-    }
-
-    @Test
-    public void testSetAndGetValue() throws Exception {
-        context.setValue(WorkflowExecutionArgs.RUN_ID, "10");
-        Assert.assertEquals(context.getValue(WorkflowExecutionArgs.RUN_ID), "10");
-        context.setValue(WorkflowExecutionArgs.RUN_ID, "1");
-    }
-
-    @Test
-    public void testSerializeDeserialize() throws Exception {
-        String contextFile = context.getContextFile();
-        context.serialize();
-        WorkflowExecutionContext newContext = WorkflowExecutionContext.deSerialize(contextFile);
-        Assert.assertNotNull(newContext);
-        Assert.assertEquals(newContext.entrySet().size(), context.entrySet().size());
-    }
-
-    @Test
-    public void testSerializeDeserializeWithFile() throws Exception {
-        String contextFile = "/tmp/blah.json";
-        context.serialize(contextFile);
-        WorkflowExecutionContext newContext = WorkflowExecutionContext.deSerialize(contextFile);
-        Assert.assertNotNull(newContext);
-        Assert.assertEquals(newContext.entrySet().size(), context.entrySet().size());
-    }
-
-    @Test
-    public void testGetFilePathForProcess() throws Exception {
-        final String filePath = WorkflowExecutionContext.getFilePath(LOGS_DIR,
-                ENTITY_NAME, "PROCESS", WorkflowExecutionContext.EntityOperations.GENERATE);
-        Assert.assertEquals(filePath,
-                LOGS_DIR + "/" + ENTITY_NAME + "-wf-post-exec-context.json");
-        Assert.assertEquals(context.getContextFile(), filePath);
-    }
-
-
-    @Test
-    public void testGetFilePathForFeedRetention() throws Exception {
-        final String filePath = WorkflowExecutionContext.getFilePath(LOGS_DIR,
-                ENTITY_NAME, "FEED", WorkflowExecutionContext.EntityOperations.DELETE);
-        Assert.assertEquals(filePath,
-                LOGS_DIR + "/context/" + ENTITY_NAME + "-wf-post-exec-context.json");
-    }
-
-    @Test
-    public void testGetFilePathForFeedReplication() throws Exception {
-        final String filePath = WorkflowExecutionContext.getFilePath(LOGS_DIR,
-                ENTITY_NAME, "FEED", WorkflowExecutionContext.EntityOperations.REPLICATE);
-        Assert.assertEquals(filePath,
-                LOGS_DIR + "/" + ENTITY_NAME + "-wf-post-exec-context.json");
-        Assert.assertEquals(context.getContextFile(), filePath);
-    }
-
-    private static String[] getTestMessageArgs() {
-        long now = new Date().getTime();
-        return new String[]{
-            "-" + WorkflowExecutionArgs.CLUSTER_NAME.getName(), CLUSTER_NAME,
-            "-" + WorkflowExecutionArgs.ENTITY_TYPE.getName(), "process",
-            "-" + WorkflowExecutionArgs.ENTITY_NAME.getName(), ENTITY_NAME,
-            "-" + WorkflowExecutionArgs.NOMINAL_TIME.getName(), NOMINAL_TIME,
-            "-" + WorkflowExecutionArgs.OPERATION.getName(), OPERATION,
-
-            "-" + WorkflowExecutionArgs.INPUT_FEED_NAMES.getName(), INPUT_FEED_NAMES,
-            "-" + WorkflowExecutionArgs.INPUT_FEED_PATHS.getName(), INPUT_INSTANCE_PATHS,
-
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_NAMES.getName(), OUTPUT_FEED_NAMES,
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_PATHS.getName(), OUTPUT_INSTANCE_PATHS,
-
-            "-" + WorkflowExecutionArgs.WORKFLOW_ID.getName(), "workflow-01-00",
-            "-" + WorkflowExecutionArgs.WORKFLOW_USER.getName(), FALCON_USER,
-            "-" + WorkflowExecutionArgs.RUN_ID.getName(), "1",
-            "-" + WorkflowExecutionArgs.STATUS.getName(), "SUCCEEDED",
-            "-" + WorkflowExecutionArgs.TIMESTAMP.getName(), NOMINAL_TIME,
-
-            "-" + WorkflowExecutionArgs.WF_ENGINE_URL.getName(), "http://localhost:11000/oozie",
-            "-" + WorkflowExecutionArgs.USER_SUBFLOW_ID.getName(), "userflow@wf-id",
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_NAME.getName(), WORKFLOW_NAME,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_VERSION.getName(), WORKFLOW_VERSION,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_ENGINE.getName(), EngineType.PIG.name(),
-
-            "-" + WorkflowExecutionArgs.BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.USER_BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.USER_BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.BRKR_TTL.getName(), "1000",
-
-            "-" + WorkflowExecutionArgs.LOG_DIR.getName(), LOGS_DIR,
-            "-" + WorkflowExecutionArgs.LOG_FILE.getName(), LOGS_DIR + "/log.txt",
-            "-" + WorkflowExecutionArgs.WF_START_TIME.getName(), Long.toString(now),
-            "-" + WorkflowExecutionArgs.WF_END_TIME.getName(), Long.toString(now + 1000000),
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/workflow/WorkflowJobEndNotificationServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/workflow/WorkflowJobEndNotificationServiceTest.java b/common/src/test/java/org/apache/falcon/workflow/WorkflowJobEndNotificationServiceTest.java
deleted file mode 100644
index 9dd8f93..0000000
--- a/common/src/test/java/org/apache/falcon/workflow/WorkflowJobEndNotificationServiceTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.util.StartupProperties;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.util.Date;
-import java.util.Properties;
-
-/**
- * A test for WorkflowJobEndNotificationService.
- */
-public class WorkflowJobEndNotificationServiceTest implements WorkflowExecutionListener {
-
-    private static final String FALCON_USER = "falcon-user";
-    private static final String LOGS_DIR = "target/log";
-    private static final String NOMINAL_TIME = "2014-01-01-01-00";
-    private static final String OPERATION = "GENERATE";
-
-    private static final String CLUSTER_NAME = "primary-cluster";
-    private static final String ENTITY_NAME = "sample-process";
-    private static final String WORKFLOW_NAME = "imp-click-join-workflow";
-    private static final String WORKFLOW_VERSION = "1.0.9";
-
-    private static final String INPUT_FEED_NAMES = "impression-feed#clicks-feed";
-    private static final String INPUT_INSTANCE_PATHS =
-            "jail://global:00/falcon/impression-feed/2014/01/01,jail://global:00/falcon/impression-feed/2014/01/02"
-                    + "#jail://global:00/falcon/clicks-feed/2014-01-01";
-
-    private static final String OUTPUT_FEED_NAMES = "imp-click-join1,imp-click-join2";
-    private static final String OUTPUT_INSTANCE_PATHS =
-            "jail://global:00/falcon/imp-click-join1/20140101,jail://global:00/falcon/imp-click-join2/20140101";
-
-    private static final String BROKER = "org.apache.activemq.ActiveMQConnectionFactory";
-
-    private WorkflowJobEndNotificationService service;
-    private WorkflowExecutionContext savedContext;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        service = new WorkflowJobEndNotificationService();
-        savedContext = WorkflowExecutionContext.create(getTestMessageArgs(),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        Assert.assertNotNull(savedContext);
-        service.init();
-        service.registerListener(this);
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        service.destroy();
-    }
-
-    @Test
-    public void testGetName() throws Exception {
-        Assert.assertEquals(service.getName(), WorkflowJobEndNotificationService.SERVICE_NAME);
-    }
-
-    @Test(priority = -1)
-    public void testBasic() throws Exception {
-        try {
-            notifyFailure(savedContext);
-            notifySuccess(savedContext);
-        } finally {
-            StartupProperties.get().setProperty("workflow.execution.listeners", "");
-        }
-    }
-
-    @Test
-    public void testNotificationsFromEngine() throws FalconException {
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(),
-                WorkflowExecutionContext.Type.WORKFLOW_JOB);
-
-        // Pretend the start was already notified
-        Properties wfProps = new Properties();
-        wfProps.put(WorkflowExecutionArgs.CLUSTER_NAME.name(), CLUSTER_NAME);
-        service.getContextMap().put("workflow-01-00", wfProps);
-
-        // Should retrieve from cache.
-        service.notifySuspend(context);
-    }
-
-    @Override
-    public void onSuccess(WorkflowExecutionContext context) throws FalconException {
-        Assert.assertNotNull(context);
-        Assert.assertEquals(context.entrySet().size(), 28);
-    }
-
-    @Override
-    public void onFailure(WorkflowExecutionContext context) throws FalconException {
-        Assert.assertNotNull(context);
-        Assert.assertEquals(context.entrySet().size(), 28);
-    }
-
-    @Override
-    public void onStart(WorkflowExecutionContext context) throws FalconException {
-    }
-
-    @Override
-    public void onSuspend(WorkflowExecutionContext context) throws FalconException {
-    }
-
-    @Override
-    public void onWait(WorkflowExecutionContext context) throws FalconException {
-
-    }
-
-    private void notifyFailure(WorkflowExecutionContext context) throws FalconException {
-        service.notifyFailure(context);
-    }
-
-    private void notifySuccess(WorkflowExecutionContext context) throws FalconException {
-        service.notifySuccess(context);
-    }
-
-    private static String[] getTestMessageArgs() {
-        return new String[]{
-            "-" + WorkflowExecutionArgs.CLUSTER_NAME.getName(), CLUSTER_NAME,
-            "-" + WorkflowExecutionArgs.ENTITY_TYPE.getName(), "process",
-            "-" + WorkflowExecutionArgs.ENTITY_NAME.getName(), ENTITY_NAME,
-            "-" + WorkflowExecutionArgs.NOMINAL_TIME.getName(), NOMINAL_TIME,
-            "-" + WorkflowExecutionArgs.OPERATION.getName(), OPERATION,
-
-            "-" + WorkflowExecutionArgs.INPUT_FEED_NAMES.getName(), INPUT_FEED_NAMES,
-            "-" + WorkflowExecutionArgs.INPUT_FEED_PATHS.getName(), INPUT_INSTANCE_PATHS,
-
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_NAMES.getName(), OUTPUT_FEED_NAMES,
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_PATHS.getName(), OUTPUT_INSTANCE_PATHS,
-
-            "-" + WorkflowExecutionArgs.WORKFLOW_ID.getName(), "workflow-01-00",
-            "-" + WorkflowExecutionArgs.WORKFLOW_USER.getName(), FALCON_USER,
-            "-" + WorkflowExecutionArgs.RUN_ID.getName(), "1",
-            "-" + WorkflowExecutionArgs.STATUS.getName(), "SUCCEEDED",
-            "-" + WorkflowExecutionArgs.TIMESTAMP.getName(), NOMINAL_TIME,
-
-            "-" + WorkflowExecutionArgs.WF_ENGINE_URL.getName(), "http://localhost:11000/oozie",
-            "-" + WorkflowExecutionArgs.USER_SUBFLOW_ID.getName(), "userflow@wf-id",
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_NAME.getName(), WORKFLOW_NAME,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_VERSION.getName(), WORKFLOW_VERSION,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_ENGINE.getName(), EngineType.PIG.name(),
-
-            "-" + WorkflowExecutionArgs.BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.USER_BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.USER_BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.BRKR_TTL.getName(), "1000",
-
-            "-" + WorkflowExecutionArgs.LOG_DIR.getName(), LOGS_DIR,
-            "-" + WorkflowExecutionArgs.LOG_FILE.getName(), LOGS_DIR + "/log.txt",
-            "-" + WorkflowExecutionArgs.WF_START_TIME.getName(), Long.toString(new Date().getTime()),
-            "-" + WorkflowExecutionArgs.WF_END_TIME.getName(), Long.toString(new Date().getTime() + 1000000),
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/classpath.properties
----------------------------------------------------------------------
diff --git a/common/src/test/resources/classpath.properties b/common/src/test/resources/classpath.properties
deleted file mode 100644
index aedc45d..0000000
--- a/common/src/test/resources/classpath.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=unittest
-
-unittest.test=hello world

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/cluster/cluster-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/cluster/cluster-0.1.xml b/common/src/test/resources/config/cluster/cluster-0.1.xml
deleted file mode 100644
index 5e36f72..0000000
--- a/common/src/test/resources/config/cluster/cluster-0.1.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<cluster colo="default" description="" name="testCluster" xmlns="uri:falcon:cluster:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:50010"
-                   version="0.20.2"/>
-        <interface type="write" endpoint="jail://testCluster:00"
-                   version="0.20.2"/>
-        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
-                   version="4.0"/>
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.1.6"/>
-        <interface type="registry" endpoint="http://localhost:48080/templeton/v1"
-                   version="0.11.0"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/projects/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/projects/falcon/working"/>
-    </locations>
-    <properties>
-        <property name="field1" value="value1"/>
-        <property name="field2" value="value2"/>
-    </properties>
-</cluster>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/cluster/cluster-bad-registry.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/cluster/cluster-bad-registry.xml b/common/src/test/resources/config/cluster/cluster-bad-registry.xml
deleted file mode 100644
index 1d15e16..0000000
--- a/common/src/test/resources/config/cluster/cluster-bad-registry.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--~
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<cluster colo="default" description="" name="testCluster" xmlns="uri:falcon:cluster:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:50010"
-                   version="0.20.2"/>
-        <interface type="write" endpoint="hdfs://localhost:8020"
-                   version="0.20.2"/>
-        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
-                   version="4.0"/>
-        <interface type="registry" endpoint="Hcat" version="0.1"/>
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.1.6"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/projects/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/projects/falcon/working"/>
-    </locations>
-    <properties>
-        <property name="field1" value="value1"/>
-        <property name="field2" value="value2"/>
-    </properties>
-</cluster>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/cluster/cluster-bad-write-endpoint.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/cluster/cluster-bad-write-endpoint.xml b/common/src/test/resources/config/cluster/cluster-bad-write-endpoint.xml
deleted file mode 100644
index fc709a2..0000000
--- a/common/src/test/resources/config/cluster/cluster-bad-write-endpoint.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<!--~
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<cluster colo="default" description="" name="testCluster" xmlns="uri:falcon:cluster:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="jail://testcluster:00"
-                   version="0.20.2"/>
-        <interface type="write" endpoint="hdfs://bad-end-point:8020"
-                   version="0.20.2"/>
-        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
-                   version="4.0"/>
-        <interface type="registry" endpoint="http://localhost:48080/templeton/v1"
-                   version="0.11.0"/>
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.1.6"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/projects/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/projects/falcon/working"/>
-    </locations>
-    <properties>
-        <property name="field1" value="value1"/>
-        <property name="field2" value="value2"/>
-    </properties>
-</cluster>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/cluster/cluster-no-messaging.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/cluster/cluster-no-messaging.xml b/common/src/test/resources/config/cluster/cluster-no-messaging.xml
deleted file mode 100644
index 93e94cb..0000000
--- a/common/src/test/resources/config/cluster/cluster-no-messaging.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<cluster colo="default" description="" name="testCluster" xmlns="uri:falcon:cluster:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:50010"
-                   version="0.20.2"/>
-        <interface type="write" endpoint="jail://testCluster:00"
-                   version="0.20.2"/>
-        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
-                   version="4.0"/>
-        <interface type="registry" endpoint="http://localhost:48080/templeton/v1"
-                   version="0.11.0"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/projects/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/projects/falcon/working"/>
-    </locations>
-</cluster>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/cluster/cluster-no-registry.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/cluster/cluster-no-registry.xml b/common/src/test/resources/config/cluster/cluster-no-registry.xml
deleted file mode 100644
index d3def81..0000000
--- a/common/src/test/resources/config/cluster/cluster-no-registry.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--~
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<cluster colo="default" description="" name="testCluster" xmlns="uri:falcon:cluster:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:50010"
-                   version="0.20.2"/>
-        <interface type="write" endpoint="hdfs://localhost:8020"
-                   version="0.20.2"/>
-        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
-                   version="4.0"/>
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.1.6"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/projects/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/projects/falcon/working"/>
-    </locations>
-    <ACL owner="falcon-ut-user" group="falcon"/>
-    <properties>
-        <property name="field1" value="value1"/>
-        <property name="field2" value="value2"/>
-    </properties>
-</cluster>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/datasource/datasource-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/datasource/datasource-0.1.xml b/common/src/test/resources/config/datasource/datasource-0.1.xml
deleted file mode 100644
index 5b09f10..0000000
--- a/common/src/test/resources/config/datasource/datasource-0.1.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<datasource colo="west-coast" description="HSQL database on west coast" type="hsql" name="test-hsql-db" xmlns="uri:falcon:datasource:0.1">
-    <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-text">
-                <userName>SA</userName>
-                <passwordText></passwordText>
-            </credential>
-        </interface>
-
-        <interface type="write" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-text">
-                <userName>SA</userName>
-                <passwordText>sqoop</passwordText>
-            </credential>
-        </interface>
-
-        <credential type="password-text">
-            <userName>SA</userName>
-            <passwordText>sqoop</passwordText>
-        </credential>
-    </interfaces>
-
-    <driver>
-       <clazz>org.hsqldb.jdbcDriver</clazz>
-       <jar>/user/oozie/share/lib/lib_20150721010816/sqoop/hsqldb-1.8.0.7.jar</jar>
-    </driver>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-</datasource>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/datasource/datasource-file-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/datasource/datasource-file-0.1.xml b/common/src/test/resources/config/datasource/datasource-file-0.1.xml
deleted file mode 100644
index 76bf3c3..0000000
--- a/common/src/test/resources/config/datasource/datasource-file-0.1.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<datasource colo="west-coast" description="HSQL database on west coast" type="hsql" name="test-hsql-db" xmlns="uri:falcon:datasource:0.1">
-    <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-file">
-                <userName>SA</userName>
-                <passwordFile>/falcon/passwordfile</passwordFile>
-            </credential>
-        </interface>
-
-        <interface type="write" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-file">
-                <userName>SA</userName>
-                <passwordFile>/falcon/passwordfile</passwordFile>
-            </credential>
-        </interface>
-
-        <credential type="password-file">
-            <userName>SA</userName>
-            <passwordFile>/falcon/passwordfile</passwordFile>
-        </credential>
-    </interfaces>
-
-    <driver>
-       <clazz>org.hsqldb.jdbcDriver</clazz>
-       <jar>/user/oozie/share/lib/lib_20150721010816/sqoop/hsqldb-1.8.0.7.jar</jar>
-    </driver>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-</datasource>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/datasource/datasource-file-0.2.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/datasource/datasource-file-0.2.xml b/common/src/test/resources/config/datasource/datasource-file-0.2.xml
deleted file mode 100644
index 3ee40ed..0000000
--- a/common/src/test/resources/config/datasource/datasource-file-0.2.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<datasource colo="west-coast" description="HSQL database on west coast" type="hsql" name="test-hsql-db" xmlns="uri:falcon:datasource:0.1">
-    <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-file">
-                <userName>SA</userName>
-                <passwordFile>"jail://global:00/falcon/passwordfile"/></passwordFile>
-            </credential>
-        </interface>
-
-        <interface type="write" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-file">
-                <userName>SA</userName>
-                <passwordFile>"jail://global:00/falcon/passwordfile"/></passwordFile>
-            </credential>
-        </interface>
-
-        <credential type="password-file">
-            <userName>SA</userName>
-            <passwordFile>"jail://global:00/falcon/passwordfile"/></passwordFile>
-        </credential>
-    </interfaces>
-
-    <driver>
-       <clazz>org.hsqldb.jdbcDriver</clazz>
-       <jar>/user/oozie/share/lib/lib_20150721010816/sqoop/hsqldb-1.8.0.7.jar</jar>
-    </driver>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-</datasource>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/datasource/datasource-invalid-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/datasource/datasource-invalid-0.1.xml b/common/src/test/resources/config/datasource/datasource-invalid-0.1.xml
deleted file mode 100644
index 04fe737..0000000
--- a/common/src/test/resources/config/datasource/datasource-invalid-0.1.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<datasource colo="west-coast" description="A new database on west coast" type="xyz" name="test-hsql-db" xmlns="uri:falcon:datasource:0.1">
-    <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-    <interfaces>
-        <interface type="readonly" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-text">
-                <userName>SA</userName>
-                <passwordText></passwordText>
-            </credential>
-        </interface>
-
-        <interface type="write" endpoint="jdbc:hsqldb:localhost/db1">
-            <credential type="password-text">
-                <userName>SA</userName>
-                <passwordText>sqoop</passwordText>
-            </credential>
-        </interface>
-
-        <credential type="password-text">
-            <userName>SA</userName>
-            <passwordText>sqoop</passwordText>
-        </credential>
-    </interfaces>
-
-    <driver>
-       <clazz>org.hsqldb.jdbcDriver</clazz>
-       <jar>/user/oozie/share/lib/lib_20150721010816/sqoop/hsqldb-1.8.0.7.jar</jar>
-    </driver>
-</datasource>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-0.1.xml b/common/src/test/resources/config/feed/feed-0.1.xml
deleted file mode 100644
index cbe50c6..0000000
--- a/common/src/test/resources/config/feed/feed-0.1.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1"
-        >
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <notification type="email" to="falcon@localhost"/>
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-
-    <properties>
-        <property name="field1" value="value1"/>
-        <property name="field2" value="value2"/>
-    </properties>
-
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-0.2.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-0.2.xml b/common/src/test/resources/config/feed/feed-0.2.xml
deleted file mode 100644
index ac137fc..0000000
--- a/common/src/test/resources/config/feed/feed-0.2.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1"
-        >
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source" partition="*/${cluster.colo}">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <locations>
-                <location type="data" path="/testCluster/projects/falcon/clicks"/>
-                <location type="stats" path="/testCluster/projects/falcon/clicksStats"/>
-                <location type="meta" path="/testCluster/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <locations>
-                <location type="data" path="/backupCluster/projects/falcon/clicks"/>
-                <location type="stats" path="/backupCluster/projects/falcon/clicksStats"/>
-                <location type="meta" path="/backupCluster/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-0.3.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-0.3.xml b/common/src/test/resources/config/feed/feed-0.3.xml
deleted file mode 100644
index e6d3e01..0000000
--- a/common/src/test/resources/config/feed/feed-0.3.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1"
-        >
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-            <lifecycle>
-                <retention-stage>
-                    <frequency>hours(10)</frequency>
-                    <queue>reports</queue>
-                    <priority>NORMAL</priority>
-                    <properties>
-                        <property name="retention.policy.agebaseddelete.limit" value="hours(9)"></property>
-                    </properties>
-                </retention-stage>
-            </lifecycle>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-    <lifecycle>
-        <retention-stage>
-            <frequency>hours(17)</frequency>
-            <queue>reports</queue>
-            <priority>NORMAL</priority>
-            <properties>
-                <property name="retention.policy.agebaseddelete.limit" value="hours(7)"></property>
-            </properties>
-        </retention-stage>
-    </lifecycle>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-0.4.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-0.4.xml b/common/src/test/resources/config/feed/feed-0.4.xml
deleted file mode 100644
index c88fb14..0000000
--- a/common/src/test/resources/config/feed/feed-0.4.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1"
-        >
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(6)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser-ut-user" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-    <lifecycle>
-        <retention-stage>
-            <frequency>hours(17)</frequency>
-            <queue>reports</queue>
-            <priority>NORMAL</priority>
-            <properties>
-                <property name="retention.policy.agebaseddelete.limit" value="hours(7)"></property>
-            </properties>
-        </retention-stage>
-    </lifecycle>
-
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-export-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-export-0.1.xml b/common/src/test/resources/config/feed/feed-export-0.1.xml
deleted file mode 100644
index d92ee17..0000000
--- a/common/src/test/resources/config/feed/feed-export-0.1.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <export>
-                <target name="test-hsql-db" tableName="customer">
-                    <load type="updateonly"/>
-                    <fields>
-                        <excludes>
-                            <field>id</field>
-                            <field>name</field>
-                        </excludes>
-                    </fields>
-                </target>
-                <arguments>
-                    <argument name="--num-mappers" value="2"/>
-                </arguments>
-            </export>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-export-exclude-fields-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-export-exclude-fields-0.1.xml b/common/src/test/resources/config/feed/feed-export-exclude-fields-0.1.xml
deleted file mode 100644
index 6753a00..0000000
--- a/common/src/test/resources/config/feed/feed-export-exclude-fields-0.1.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <export>
-                <target name="test-hsql-db" tableName="customer">
-                    <load type="updateonly"/>
-                    <fields>
-                        <excludes>
-                            <field>id</field>
-                            <field>name</field>
-                        </excludes>
-                    </fields>
-                </target>
-                <arguments>
-                    <argument name="--update-key" value="id"/>
-                </arguments>
-            </export>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-import-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-import-0.1.xml b/common/src/test/resources/config/feed/feed-import-0.1.xml
deleted file mode 100644
index 69f7ede..0000000
--- a/common/src/test/resources/config/feed/feed-import-0.1.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <import>
-                <source name="test-hsql-db" tableName="customer">
-                    <extract type="full">
-                        <mergepolicy>snapshot</mergepolicy>
-                    </extract>
-                    <fields>
-                        <includes>
-                            <field>id</field>
-                            <field>name</field>
-                        </includes>
-                    </fields>
-                </source>
-                <arguments>
-                    <argument name="--split-by" value="id"/>
-                    <argument name="--num-mappers" value="2"/>
-                </arguments>
-            </import>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-import-exclude-fields-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-import-exclude-fields-0.1.xml b/common/src/test/resources/config/feed/feed-import-exclude-fields-0.1.xml
deleted file mode 100644
index 5a6fcd9..0000000
--- a/common/src/test/resources/config/feed/feed-import-exclude-fields-0.1.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <import>
-                <source name="test-hsql-db" tableName="customer">
-                    <extract type="full">
-                        <mergepolicy>snapshot</mergepolicy>
-                    </extract>
-                    <fields>
-                        <excludes>
-                            <field>id</field>
-                            <field>name</field>
-                        </excludes>
-                    </fields>
-                </source>
-                <arguments>
-                    <argument name="--split-by" value="id"/>
-                    <argument name="--num-mappers" value="2"/>
-                </arguments>
-            </import>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-import-invalid-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-import-invalid-0.1.xml b/common/src/test/resources/config/feed/feed-import-invalid-0.1.xml
deleted file mode 100644
index 9428bce..0000000
--- a/common/src/test/resources/config/feed/feed-import-invalid-0.1.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <import>
-                <source name="test-hsql-db" tableName="customer">
-                    <extract type="full">
-                        <mergepolicy>snapshot</mergepolicy>
-                    </extract>
-                    <fields>
-                        <includes>
-                            <field>id</field>
-                            <field>name</field>
-                        </includes>
-                    </fields>
-                </source>
-                <arguments>
-                    <argument name="--num-mappers" value="2"/>
-                </arguments>
-            </import>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/resources/config/feed/feed-import-noargs-0.1.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/config/feed/feed-import-noargs-0.1.xml b/common/src/test/resources/config/feed/feed-import-noargs-0.1.xml
deleted file mode 100644
index c96249c..0000000
--- a/common/src/test/resources/config/feed/feed-import-noargs-0.1.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<feed description="Customer data" name="CustomerFeed" xmlns="uri:falcon:feed:0.1">
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting</tags>
-    <partitions>
-        <partition name="fraud"/>
-        <partition name="good"/>
-    </partitions>
-
-    <groups>online,bi</groups>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-
-    <frequency>hours(1)</frequency>
-    <sla slaLow="hours(2)" slaHigh="hours(3)"/>
-    <timezone>UTC</timezone>
-
-    <late-arrival cut-off="hours(6)"/>
-
-    <clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <retention limit="hours(48)" action="delete"/>
-            <!-- Limit can be in Time or Instances 100, Action ENUM DELETE,ARCHIVE -->
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <import>
-                <source name="test-hsql-db" tableName="customer">
-                    <extract type="full">
-                        <mergepolicy>snapshot</mergepolicy>
-                    </extract>
-                </source>
-            </import>
-            <locations>
-                <location type="data" path="/projects/falcon/clicks"/>
-                <location type="stats" path="/projects/falcon/clicksStats"/>
-                <location type="meta" path="/projects/falcon/clicksMetaData"/>
-            </locations>
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/falcon/clicks"/>
-        <location type="stats" path="/projects/falcon/clicksStats"/>
-        <location type="meta" path="/projects/falcon/clicksMetaData"/>
-    </locations>
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>


[08/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/SearchPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/SearchPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/SearchPage.java
deleted file mode 100644
index 15d50b0..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/SearchPage.java
+++ /dev/null
@@ -1,456 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.Keys;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.PageFactory;
-import org.testng.Assert;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Set;
-
-/** Page object for the Search Page. */
-public class SearchPage extends AbstractSearchPage {
-
-    private static final String CLASS_OF_SELECTED_ROW = "rowSelected";
-
-    private static final Logger LOGGER = Logger.getLogger(SearchPage.class);
-
-    public SearchPage(WebDriver driver) {
-        super(driver);
-    }
-
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "searchBoxContainer")
-    })
-    private WebElement searchBlock;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "dashboardBox")
-    })
-    private WebElement resultBlock;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "dashboardBox"),
-        @FindBy(tagName = "thead")
-    })
-    private WebElement resultHeader;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "dashboardBox"),
-        @FindBy(className = "buttonRow")
-    })
-    private WebElement resultButtons;
-
-    private List<WebElement> getSearchResultElements() {
-        return resultBlock.findElements(By.className("entityRow"));
-    }
-
-    public List<SearchResult> getSearchResults() {
-        List<SearchResult> searchResults = new ArrayList<>();
-        for (WebElement oneResultElement : getSearchResultElements()) {
-            final List<WebElement> resultParts = oneResultElement.findElements(By.tagName("td"));
-            final String entityName = resultParts.get(1).getText();
-            final SearchResult searchResult = SearchResult.create(entityName);
-
-            final String[] allClasses = oneResultElement.getAttribute("class").split(" ");
-            if (Arrays.asList(allClasses).contains(CLASS_OF_SELECTED_ROW)) {
-                searchResult.withChecked(true);
-            }
-
-            final String tags = resultParts.get(2).getText();
-            searchResult.withTags(tags);
-
-            final String clusterName = resultParts.get(3).getText();
-            searchResult.withClusterName(clusterName);
-
-            final String type = resultParts.get(4).getText();
-            searchResult.withType(type);
-
-            final String status = resultParts.get(5).getText();
-            searchResult.withStatus(status);
-            searchResults.add(searchResult);
-        }
-        return searchResults;
-    }
-
-
-    public EntityPage openEntityPage(String entityName) {
-        return click(doSearch(entityName).get(0));
-    }
-
-    public EntityPage click(SearchResult result) {
-        LOGGER.info("attempting to click: " + result + " on search page.");
-        for (WebElement oneResultElement : getSearchResultElements()) {
-            final List<WebElement> resultParts = oneResultElement.findElements(By.tagName("td"));
-            final WebElement entityNameElement = resultParts.get(1);
-            final String entityName = entityNameElement.getText();
-            if (entityName.equals(result.getEntityName())) {
-                entityNameElement.findElement(By.tagName("button")).click();
-                return PageFactory.initElements(driver, EntityPage.class);
-            }
-        }
-        return  null;
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(searchBlock, "Cluster box");
-    }
-
-    private WebElement getSearchBox() {
-        return searchBlock.findElement(By.className("input"));
-    }
-
-    public List<SearchResult> doSearch(String searchString) {
-        clearSearch();
-        return appendAndSearch(searchString);
-    }
-
-    public List<SearchResult> appendAndSearch(String appendedPart) {
-        for(String queryParam : appendedPart.split("\\s+")) {
-            focusOnSearchBox();
-            getSearchBox().sendKeys(queryParam);
-            getSearchBox().sendKeys(Keys.ENTER);
-        }
-        waitForAngularToFinish();
-        if (resultBlock.isDisplayed()) {
-            return getSearchResults();
-        } else {
-            return Collections.emptyList();
-        }
-
-    }
-
-    public SearchQuery getSearchQuery() {
-        return new SearchQuery(searchBlock);
-    }
-
-    public void clearSearch() {
-        focusOnSearchBox();
-        getSearchBox().clear();
-        SearchQuery query = getSearchQuery();
-        for (int i = 0; i < query.getElementsNumber(); i++) {
-            removeLastParam();
-        }
-    }
-
-    public void removeLastParam() {
-        focusOnSearchBox();
-        getSearchBox().sendKeys(Keys.BACK_SPACE);
-        getSearchBox().sendKeys(Keys.BACK_SPACE);
-    }
-
-    private void focusOnSearchBox() {
-        driver.findElement(By.className("tags")).click();
-    }
-
-    public void checkNoResult() {
-        UIAssert.assertNotDisplayed(resultBlock, "Search result block");
-    }
-
-    public void selectRow(int row) {
-        changeRowClickedStatus(row, true);
-    }
-
-    public void deselectRow(int row) {
-        changeRowClickedStatus(row, false);
-    }
-
-    private void changeRowClickedStatus(int row, boolean checked) {
-        WebElement checkboxBlock = resultBlock.findElements(By.className("entityRow")).get(row - 1);
-        if (checked != checkboxBlock.getAttribute("class").contains(CLASS_OF_SELECTED_ROW)) {
-            checkboxBlock.findElement(By.xpath("./td/input")).click();
-        }
-    }
-
-    public void clickSelectAll() {
-        resultBlock.findElement(By.xpath(".//input[@ng-model='selectedAll']")).click();
-    }
-
-
-    /** Class representing search query displayed in the search box. */
-    public static final class SearchQuery {
-        private WebElement searchBlock;
-        private String name;
-        private String type;
-        private int elementsNumber;
-        private final List<String> tags = new ArrayList<>();
-        private static final Logger LOGGER = Logger.getLogger(SearchQuery.class);
-
-        public SearchQuery(WebElement searchBlock) {
-            this.searchBlock = searchBlock;
-            updateElements();
-        }
-
-        private SearchQuery updateElements() {
-            name = null;
-            tags.clear();
-            final WebElement queryGroup = searchBlock.findElement(By.className("tag-list"));
-            final List<WebElement> queryParts = queryGroup.findElements(By.tagName("li"));
-            elementsNumber = queryParts.size();
-            for (WebElement queryPart : queryParts) {
-                final WebElement queryLabel = queryPart.findElement(By.tagName("strong"));
-                final String queryText = queryPart.findElement(By.tagName("span")).getText();
-                switch (queryLabel.getText().trim()) {
-                case "Name:":
-                    if (name != null) {
-                        LOGGER.warn(String.format("NAME block is already added: '%s' => '%s'",
-                            name, queryText));
-                    }
-                    name = queryText;
-                    break;
-                case "Tag:":
-                    tags.add(queryText);
-                    break;
-                default:
-                }
-            }
-            return this;
-        }
-
-
-        public String getName() {
-            return name;
-        }
-
-        public List<String> getTags() {
-            return tags;
-        }
-
-        public int getElementsNumber() {
-            return elementsNumber;
-        }
-
-        /**
-         * Delete element by index (1, 2, 3,..).
-         * @param index of element in search query.
-         * @return true if deletion was successful
-         */
-        public boolean deleteByIndex(int index) {
-            if (index > elementsNumber || index < 1) {
-                LOGGER.warn("There is no element with index=" + index);
-                return false;
-            }
-            int oldElementsNumber = elementsNumber;
-            final WebElement queryGroup = searchBlock.findElement(By.className("tag-list"));
-            final List<WebElement> queryParts = queryGroup.findElements(By.tagName("li"));
-            queryParts.get(index - 1).findElement(By.className("remove-button")).click();
-            this.updateElements();
-            boolean result = oldElementsNumber == elementsNumber + 1;
-            LOGGER.info(String.format(
-                "Element with index=%d was%s deleted", index, result ? "" : "n't"));
-            return result;
-        }
-
-        public boolean deleteLast() {
-            return deleteByIndex(elementsNumber);
-        }
-    }
-
-    public Set<Button> getButtons(boolean active) {
-        List<WebElement> buttons = resultBlock.findElement(By.className("buttonsRow"))
-            .findElements(By.className("btn"));
-        Set<Button> result = EnumSet.noneOf(Button.class);
-        for (WebElement button : buttons) {
-            if ((button.getAttribute("disabled") == null) == active) {
-                result.add(Button.valueOf(button.getText()));
-            }
-        }
-        return result;
-    }
-
-    public void clickButton(Button button) {
-        resultBlock.findElement(By.className("buttonsRow"))
-            .findElements(By.className("btn")).get(button.ordinal()).click();
-        waitForAngularToFinish();
-    }
-
-    /**
-     * Buttons available for entities in result box.
-     */
-    public enum Button {
-        Schedule,
-        Resume,
-        Suspend,
-        Edit,
-        Copy,
-        Delete,
-        XML
-    }
-
-    /** Class representing search result displayed on the entity table page. */
-    public static final class SearchResult {
-        private boolean isChecked = false;
-        private String entityName;
-        private String tags = "";
-        private String clusterName;
-        private String type;
-        private EntityStatus status;
-
-        public static SearchResult create(String entityName) {
-            return new SearchResult(entityName);
-        }
-
-        public SearchResult withChecked(boolean pIsChecked) {
-            this.isChecked = pIsChecked;
-            return this;
-        }
-
-        private SearchResult(String entityName) {
-            this.entityName = entityName;
-        }
-
-        public SearchResult withTags(String pTags) {
-            this.tags = pTags;
-            return this;
-        }
-
-        public SearchResult withClusterName(String pClusterName) {
-            this.clusterName = pClusterName;
-            return this;
-        }
-
-        public SearchResult withType(String pType) {
-            this.type = pType;
-            return this;
-        }
-
-        public SearchResult withStatus(String pStatus) {
-            this.status = EntityStatus.valueOf(pStatus);
-            return this;
-        }
-
-        public boolean isChecked() {
-            return isChecked;
-        }
-
-        public String getEntityName() {
-            return entityName;
-        }
-
-        public String getTags() {
-            return tags;
-        }
-
-        public String getClusterName() {
-            Assert.assertFalse(clusterName.contains(","), "getClusterName() called"
-                + " in multi-cluster setup: " + clusterName + ", maybe use getClusterNames()");
-            return clusterName;
-        }
-
-        public List<String> getClusterNames() {
-            return Arrays.asList(clusterName.split(","));
-        }
-
-
-        public String getType() {
-            return type;
-        }
-
-        public EntityStatus getStatus() {
-            return status;
-        }
-
-        @Override
-        public String toString() {
-            return "SearchResult{"
-                + "isChecked=" + isChecked
-                + ", entityName='" + entityName + '\''
-                + ", tags='" + tags + '\''
-                + ", clusterName='" + clusterName + '\''
-                + ", type='" + type + '\''
-                + ", status='" + status + '\''
-                + '}';
-        }
-
-        public static void assertEqual(List<SearchResult> searchResults,
-                                       List<Entity> expectedEntities, String errorMessage) {
-            Assert.assertEquals(searchResults.size(), expectedEntities.size(), errorMessage
-                + "(Length of lists don't match, searchResults: " + searchResults
-                + " expectedEntities: " + expectedEntities + ")");
-            for (Entity entity : expectedEntities) {
-                boolean found = false;
-                for (SearchResult result : searchResults) {
-                    //entities are same if they have same name & type
-                    if (entity.getName().equals(result.entityName)) {
-                        //entity type in SearchResults has a different meaning
-                        //so, not comparing entity types
-
-                        //equality of cluster names
-                        List<String> entityClusters = null;
-                        switch (entity.getEntityType()) {
-                        case FEED:
-                            final FeedMerlin feed = (FeedMerlin) entity;
-                            entityClusters = feed.getClusterNames();
-                            // tags equality check
-                            Assert.assertEquals(result.getTags(),
-                                StringUtils.trimToEmpty(feed.getTags()),
-                                errorMessage + "(tags mismatch: " + result.entityName
-                                    + " & " + entity.toShortString() + ")");
-                            break;
-                        case PROCESS:
-                            final ProcessMerlin process = (ProcessMerlin) entity;
-                            entityClusters = process.getClusterNames();
-                            // tags equality check
-                            Assert.assertEquals(result.getTags(),
-                                StringUtils.trimToEmpty(process.getTags()),
-                                errorMessage + "(tags mismatch: " + result.entityName
-                                    + " & " + entity.toShortString() + ")");
-                            break;
-                        default:
-                            Assert.fail("Cluster entity is unexpected: " + entity);
-                            break;
-                        }
-                        Collections.sort(entityClusters);
-                        final List<String> actualClusters = result.getClusterNames();
-                        Collections.sort(actualClusters);
-                        Assert.assertEquals(actualClusters, entityClusters, errorMessage
-                            + "(cluster names mismatch: " + result + " " + entity + ")");
-                        found = true;
-                    }
-                }
-                Assert.assertTrue(found,
-                    "Entity: " + entity.toShortString() + " not found in: " + searchResults);
-            }
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/resources/errorMapping.properties
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/resources/errorMapping.properties b/falcon-regression/merlin/src/main/resources/errorMapping.properties
deleted file mode 100644
index a6adf51..0000000
--- a/falcon-regression/merlin/src/main/resources/errorMapping.properties
+++ /dev/null
@@ -1,26 +0,0 @@
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-NoRetrySpecified.xml=javax.xml.bind.UnmarshalException - with linked exception:[org.xml.sax.SAXParseException; lineNumber: 54; columnNumber: 67; cvc-complex-type.2.4.a: Invalid content was found starting with element 'late-process'. One of '{retry}' is expected.]
-noConcurrencyParam.xml=javax.xml.bind.UnmarshalException - with linked exception:[org.xml.sax.SAXParseException; lineNumber: 29; columnNumber: 16; cvc-complex-type.2.4.a: Invalid content was found starting with element 'execution'. One of '{concurrency}' is expected.]
-noExecutionSpecified.xml=javax.xml.bind.UnmarshalException - with linked exception:[org.xml.sax.SAXParseException; lineNumber: 29; columnNumber: 16; cvc-complex-type.2.4.a: Invalid content was found starting with element 'frequency'. One of '{execution}' is expected.]
-NoWorkflowParams.xml=javax.xml.bind.UnmarshalException - with linked exception:[org.xml.sax.SAXParseException; lineNumber: 52; columnNumber: 71; cvc-complex-type.2.4.a: Invalid content was found starting with element 'retry'. One of '{workflow}' is expected.]
-process-invalid.xml=javax.xml.bind.UnmarshalException - with linked exception:[org.xml.sax.SAXParseException; lineNumber: 2; columnNumber: 72; cvc-elt.1: Cannot find the declaration of element 'Process'.]
-inValid01_sameName.xml=inValid01_sameName already exists
-inValid02_sameName.xml=inValid02_sameName already exists
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/resources/log4testng.properties
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/resources/log4testng.properties b/falcon-regression/merlin/src/main/resources/log4testng.properties
deleted file mode 100644
index 34b4bde..0000000
--- a/falcon-regression/merlin/src/main/resources/log4testng.properties
+++ /dev/null
@@ -1,29 +0,0 @@
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# log4testng will log its own behavior (generally used for debugging this package only).
- log4testng.debug=false
-
- # Specifies the root Loggers logging level. Will log DEBUG level and above
- log4testng.rootLogger=DEBUG
-
- # The org.testng.reporters.EmailableReporter Logger will log TRACE level and above
- log4testng.logger.org.testng.reporters.EmailableReporter=TRACE
-
- # All Logger in packages below org.testng will log WARN level and above
- log4testng.logger.org.testng=INFO
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/AuthorizationTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/AuthorizationTest.java
deleted file mode 100644
index 714a21f..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/AuthorizationTest.java
+++ /dev/null
@@ -1,772 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.commons.httpclient.HttpStatus;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.oozie.client.BundleJob;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-import org.apache.log4j.Logger;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.List;
-
-/**
- * test for Authorization in falcon .
- */
-@Test(groups = "embedded")
-public class AuthorizationTest extends BaseTestClass {
-    private static final Logger LOGGER = Logger.getLogger(AuthorizationTest.class);
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private String feedInputPath = baseTestDir + "/input" + MINUTE_DATE_PATTERN;
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        Bundle bundle = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundle, cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-    }
-
-    /**
-     * U2Delete test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SubmitU2DeleteCluster() throws Exception {
-        bundles[0].submitClusters(prism);
-        final ServiceResponse serviceResponse = prism.getClusterHelper().delete(
-            bundles[0].getClusters().get(0), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Entity submitted by first user should not be deletable by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SubmitU2DeleteProcess() throws Exception {
-        bundles[0].submitClusters(prism);
-        bundles[0].submitProcess(true);
-        final ServiceResponse serviceResponse = prism.getProcessHelper().delete(
-            bundles[0].getProcessData(), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Entity submitted by first user should not be deletable by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SubmitU2DeleteFeed() throws Exception {
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeed();
-        final ServiceResponse serviceResponse = prism.getFeedHelper().delete(
-            bundles[0].getDataSets().get(0), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Entity submitted by first user should not be deletable by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2DeleteProcess()
-        throws Exception {
-        //submit, schedule process by U1
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.RUNNING);
-        //try to delete process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper()
-            .delete(bundles[0].getProcessData(), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process scheduled by first user should not be deleted by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2DeleteFeed() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        //submit, schedule feed by U1
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-        //delete feed by U2
-        final ServiceResponse serviceResponse = prism.getFeedHelper().delete(feed, MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed scheduled by first user should not be deleted by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2DeleteProcess() throws Exception {
-        //submit, schedule, suspend process by U1
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.RUNNING);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(bundles[0].getProcessData()));
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.SUSPENDED);
-        //try to delete process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper()
-            .delete(bundles[0].getProcessData(), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process suspended by first user should not be deleted by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2DeleteFeed() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        //submit, schedule, suspend feed by U1
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().suspend(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-        //delete feed by U2
-        final ServiceResponse serviceResponse = prism.getFeedHelper()
-            .delete(feed, MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed scheduled by first user should not be deleted by second user");
-    }
-
-    /**
-     * U2Suspend test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2SuspendFeed() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        //submit, schedule by U1
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-        //try to suspend by U2
-        final ServiceResponse serviceResponse = prism.getFeedHelper()
-            .suspend(feed, MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed scheduled by first user should not be suspended by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2SuspendProcess() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.RUNNING);
-        //try to suspend process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper()
-            .suspend(bundles[0].getProcessData(), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process scheduled by first user should not be suspended by second user");
-    }
-
-    /**
-     * U2Resume test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2ResumeFeed() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        //submit, schedule and then suspend feed by User1
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().suspend(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-        //try to resume feed by User2
-        final ServiceResponse serviceResponse = prism.getFeedHelper()
-            .resume(feed, MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed suspended by first user should not be resumed by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2ResumeProcess() throws Exception {
-        //submit, schedule, suspend process by U1
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(bundles[0].getProcessData()));
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.SUSPENDED);
-        //try to resume process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper()
-            .resume(bundles[0].getProcessData(), MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process suspended by first user should not be resumed by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2ResumeProcessInstances() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        String midTime = TimeUtil.addMinsToTime(startTime, 2);
-        LOGGER.info("Start time: " + startTime + "\tEnd time: " + endTime);
-
-        //prepare process definition
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].setInputFeedPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessInput("now(0,0)", "now(0,4)");
-
-        //provide necessary data for first 3 instances to run
-        LOGGER.info("Creating necessary data...");
-        String prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(
-            TimeUtil.addMinsToTime(startTime, -2), endTime, 0);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-
-        //submit, schedule process by U1
-        LOGGER.info("Process data: " + Util.prettyPrintXml(bundles[0].getProcessData()));
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        //check that there are 3 running instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS);
-
-        //check that there are 2 waiting instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 2,
-                CoordinatorAction.Status.WAITING, EntityType.PROCESS);
-
-        //3 instances should be running , other 2 should be waiting
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 5, 3, 0, 2, 0);
-
-        //suspend 3 running instances
-        r = prism.getProcessHelper().getProcessInstanceSuspend(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + midTime);
-        InstanceUtil.validateResponse(r, 3, 0, 3, 0, 0);
-
-        //try to resume suspended instances by U2
-        r = prism.getProcessHelper().getProcessInstanceResume(bundles[0].getProcessName(), "?start=" + startTime
-                + "&end=" + midTime, MerlinConstants.USER2_NAME);
-
-        //the state of above 3 instances should still be suspended
-        InstanceUtil.validateResponse(r, 3, 0, 3, 0, 0);
-
-        //check the status of all instances
-        r = prism.getProcessHelper().getProcessInstanceStatus(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 5, 0, 3, 2, 0);
-    }
-
-    /**
-     * U2Kill test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2KillProcessInstances() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Start time: " + startTime + "\tEnd time: " + endTime);
-
-        //prepare process definition
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].setInputFeedPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessInput("now(0,0)", "now(0,4)");
-
-        //provide necessary data for first 3 instances to run
-        LOGGER.info("Creating necessary data...");
-        String prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(
-            TimeUtil.addMinsToTime(startTime, -2), endTime, 0);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-
-        //submit, schedule process by U1
-        LOGGER.info("Process data: " + Util.prettyPrintXml(bundles[0].getProcessData()));
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        //check that there are 3 running instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS);
-
-        //3 instances should be running , other 2 should be waiting
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 5, 3, 0, 2, 0);
-
-        //try to kill all instances by U2
-        r = prism.getProcessHelper().getProcessInstanceKill(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime, MerlinConstants.USER2_NAME);
-
-        //number of instances should be the same as before
-        InstanceUtil.validateResponse(r, 5, 3, 0, 2, 0);
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SuspendU2KillProcessInstances() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        String midTime = TimeUtil.addMinsToTime(startTime, 2);
-        LOGGER.info("Start time: " + startTime + "\tEnd time: " + endTime);
-
-        //prepare process definition
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].setInputFeedPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessInput("now(0,0)", "now(0,4)");
-
-        //provide necessary data for first 3 instances to run
-        LOGGER.info("Creating necessary data...");
-        String prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(
-            TimeUtil.addMinsToTime(startTime, -2), endTime, 0);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-
-        //submit, schedule process by U1
-        LOGGER.info("Process data: " + Util.prettyPrintXml(bundles[0].getProcessData()));
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        //check that there are 3 running instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS);
-
-        //check that there are 2 waiting instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 2,
-                CoordinatorAction.Status.WAITING, EntityType.PROCESS);
-
-        //3 instances should be running , other 2 should be waiting
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 5, 3, 0, 2, 0);
-
-        //suspend 3 running instances
-        r = prism.getProcessHelper().getProcessInstanceSuspend(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + midTime);
-        InstanceUtil.validateResponse(r, 3, 0, 3, 0, 0);
-
-        //try to kill all instances by U2
-        r = prism.getProcessHelper().getProcessInstanceKill(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime, MerlinConstants.USER2_NAME);
-
-        //3 should still be suspended, 2 should be waiting
-        InstanceUtil.validateResponse(r, 5, 0, 3, 2, 0);
-    }
-
-    /**
-     * U2Rerun test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1KillSomeU2RerunAllProcessInstances()
-        throws IOException, JAXBException, AuthenticationException, URISyntaxException,
-        OozieClientException, InterruptedException {
-        String startTime = TimeUtil
-            .getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        String midTime = TimeUtil.addMinsToTime(startTime, 2);
-        LOGGER.info("Start time: " + startTime + "\tEnd time: " + endTime);
-
-        //prepare process definition
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].setInputFeedPeriodicity(1, Frequency.TimeUnit.minutes);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessInput("now(0,0)", "now(0,3)");
-
-        //provide necessary data for first 4 instances to run
-        LOGGER.info("Creating necessary data...");
-        String prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(
-            TimeUtil.addMinsToTime(startTime, -2), endTime, 0);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-
-        //submit, schedule process by U1
-        LOGGER.info("Process data: " + Util.prettyPrintXml(bundles[0].getProcessData()));
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        //check that there are 4 running instances
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 4,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS);
-
-        //4 instances should be running , 1 should be waiting
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(bundles[0].getProcessName(),
-            "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 5, 4, 0, 1, 0);
-
-        //kill 3 running instances
-        r = prism.getProcessHelper().getProcessInstanceKill(bundles[0].getProcessName(),
-                "?start=" + startTime + "&end=" + midTime);
-        InstanceUtil.validateResponse(r, 3, 0, 0, 0, 3);
-
-        //generally 3 instances should be killed, 1 is running and 1 is waiting
-
-        //try to rerun instances by U2
-        r = prism.getProcessHelper().getProcessInstanceRerun(bundles[0].getProcessName(),
-                "?start=" + startTime + "&end=" + midTime, MerlinConstants.USER2_NAME);
-
-        //instances should still be killed
-        InstanceUtil.validateResponse(r, 3, 0, 0, 0, 3);
-    }
-
-    /**
-     * U2Update test cases.
-     */
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SubmitU2UpdateFeed()
-        throws URISyntaxException, IOException, AuthenticationException, JAXBException,
-        InterruptedException {
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        //submit feed
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feed.toString()));
-        String definition = prism.getFeedHelper().getEntityDefinition(feed.toString()).getMessage();
-        Assert.assertTrue(definition.contains(feed.getName()) && !definition.contains("(feed) not found"),
-            "Feed should be already submitted");
-        //update feed definition
-        FeedMerlin newFeed = new FeedMerlin(feed);
-        newFeed.setFeedPathValue(baseTestDir + "/randomPath" + MINUTE_DATE_PATTERN);
-        //try to update feed by U2
-        final ServiceResponse serviceResponse = prism.getFeedHelper().update(feed.toString(), newFeed.toString(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed submitted by first user should not be updated by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2UpdateFeed() throws Exception {
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        //submit and schedule feed
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed.toString(), Job.Status.RUNNING);
-        //update feed definition
-        FeedMerlin newFeed = new FeedMerlin(feed);
-        newFeed.setFeedPathValue(baseTestDir + "/randomPath" + MINUTE_DATE_PATTERN);
-        //try to update feed by U2
-        final ServiceResponse serviceResponse = prism.getFeedHelper().update(feed.toString(), newFeed.toString(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Feed scheduled by first user should not be updated by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1SubmitU2UpdateProcess() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        String processName = bundles[0].getProcessName();
-        //submit process
-        bundles[0].submitBundle(prism);
-        String definition = prism.getProcessHelper()
-            .getEntityDefinition(bundles[0].getProcessData()).getMessage();
-        Assert.assertTrue(definition.contains(processName)
-                &&
-            !definition.contains("(process) not found"), "Process should be already submitted");
-        //update process definition
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2020-01-02T01:04Z");
-        //try to update process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper().update(bundles[0]
-                .getProcessData(), bundles[0].getProcessData(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process submitted by first user should not be updated by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleU2UpdateProcess() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        //submit, schedule process by U1
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(),
-            Job.Status.RUNNING);
-        //update process definition
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2020-01-02T01:04Z");
-        //try to update process by U2
-        final ServiceResponse serviceResponse = prism.getProcessHelper().update(bundles[0]
-                .getProcessData(), bundles[0].getProcessData(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertFailedWithStatus(serviceResponse, HttpStatus.SC_BAD_REQUEST,
-            "Process scheduled by first user should not be updated by second user");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleFeedU2ScheduleDependantProcessU1UpdateFeed() throws Exception {
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2099-01-02T01:00Z");
-        //submit both feeds
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeeds(prism);
-        //schedule input feed by U1
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed.toString()));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed.toString(), Job.Status.RUNNING);
-
-        //by U2 schedule process dependant on scheduled feed by U1
-        ServiceResponse serviceResponse = prism.getProcessHelper()
-            .submitAndSchedule(bundles[0].getProcessData(), MerlinConstants.USER2_NAME, "");
-        AssertUtil.assertSucceeded(serviceResponse);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(), Job.Status.RUNNING);
-
-        //get old process details
-        String oldProcessBundleId = OozieUtil
-            .getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        String oldProcessUser = getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        //get old feed details
-        String oldFeedBundleId = OozieUtil.getLatestBundleID(clusterOC, feed.getName(), EntityType.FEED);
-        String oldFeedUser = getBundleUser(clusterOC, feed.getName(), EntityType.FEED);
-
-        //update feed definition
-        FeedMerlin newFeed = new FeedMerlin(feed);
-        newFeed.setFeedPathValue(baseTestDir + "/randomPath" + MINUTE_DATE_PATTERN);
-
-        //update feed by U1
-        serviceResponse = prism.getFeedHelper().update(feed.toString(), newFeed.toString(),
-            MerlinConstants.CURRENT_USER_NAME);
-        AssertUtil.assertSucceeded(serviceResponse);
-
-        //new feed bundle should be created by U1
-        OozieUtil.verifyNewBundleCreation(clusterOC, oldFeedBundleId, null, newFeed.toString(), true, false);
-        String newFeedUser = getBundleUser(clusterOC, newFeed.getName(), EntityType.FEED);
-        Assert.assertEquals(oldFeedUser, newFeedUser, "User should be the same");
-
-        //new process bundle should be created by U2
-        OozieUtil.verifyNewBundleCreation(
-            clusterOC, oldProcessBundleId, null, bundles[0].getProcessData(), true, false);
-        String newProcessUser =
-            getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        Assert.assertEquals(oldProcessUser, newProcessUser, "User should be the same");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleFeedU2ScheduleDependantProcessU2UpdateFeed() throws Exception {
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2099-01-02T01:00Z");
-        //submit both feeds
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeeds(prism);
-        //schedule input feed by U1
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed.toString()));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed.toString(), Job.Status.RUNNING);
-
-        //by U2 schedule process dependent on scheduled feed by U1
-        ServiceResponse serviceResponse = prism.getProcessHelper().submitAndSchedule(bundles[0].getProcessData(),
-                MerlinConstants.USER2_NAME, "");
-        AssertUtil.assertSucceeded(serviceResponse);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(), Job.Status.RUNNING);
-
-        //update feed definition
-        FeedMerlin newFeed = new FeedMerlin(feed);
-        newFeed.setFeedPathValue(baseTestDir + "/randomPath" + MINUTE_DATE_PATTERN);
-
-        //get old process details
-        String oldProcessBundleId = OozieUtil
-                .getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        String oldProcessUser = getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        //get old feed details
-        String oldFeedBundleId = OozieUtil.getLatestBundleID(clusterOC, feed.getName(), EntityType.FEED);
-        String oldFeedUser = getBundleUser(clusterOC, feed.getName(), EntityType.FEED);
-
-        //update feed by U2
-        serviceResponse = prism.getFeedHelper().update(feed.toString(), newFeed.toString(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertSucceeded(serviceResponse);
-
-        //new feed bundle should be created by U2
-        OozieUtil.verifyNewBundleCreation(clusterOC, oldFeedBundleId, null, newFeed.toString(), true, false);
-        String newFeedUser = getBundleUser(clusterOC, newFeed.getName(), EntityType.FEED);
-        Assert.assertNotEquals(oldFeedUser, newFeedUser, "User should not be the same");
-        Assert.assertEquals(MerlinConstants.USER2_NAME, newFeedUser);
-
-        //new process bundle should be created by U2
-        OozieUtil.verifyNewBundleCreation(
-            clusterOC, oldProcessBundleId, null, bundles[0].getProcessData(), true, false);
-        String newProcessUser = getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        Assert.assertEquals(oldProcessUser, newProcessUser, "User should be the same");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleFeedU1ScheduleDependantProcessU1UpdateProcess() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2099-01-02T01:00Z");
-        //submit both feeds
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeeds(prism);
-        //schedule input feed by U1
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-
-        //by U1 schedule process dependent on scheduled feed by U1
-        ServiceResponse serviceResponse = prism.getProcessHelper().submitAndSchedule(bundles[0].getProcessData());
-        AssertUtil.assertSucceeded(serviceResponse);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(), Job.Status.RUNNING);
-
-        //get old process details
-        String oldProcessBundleId = OozieUtil
-                .getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        String oldProcessUser = getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        //get old feed details
-        String oldFeedBundleId = OozieUtil.getLatestBundleID(clusterOC, Util.readEntityName(feed), EntityType.FEED);
-
-        //update process by U1
-        ProcessMerlin processObj = bundles[0].getProcessObject().withProperty("randomProp", "randomVal");
-        serviceResponse = prism.getProcessHelper().update(bundles[0].getProcessData(), processObj.toString());
-        AssertUtil.assertSucceeded(serviceResponse);
-
-        //new feed bundle should not be created
-        OozieUtil.verifyNewBundleCreation(clusterOC, oldFeedBundleId, null, feed, false, false);
-
-        //new process bundle should be created by U1
-        OozieUtil.verifyNewBundleCreation(
-            clusterOC, oldProcessBundleId, null, bundles[0].getProcessData(), true, false);
-        String newProcessUser = getBundleUser(clusterOC, processObj.getName(), EntityType.PROCESS);
-        Assert.assertEquals(oldProcessUser, newProcessUser, "User should be the same");
-    }
-
-    //disabled since, falcon does not have authorization https://issues.apache
-    // .org/jira/browse/FALCON-388
-    @Test(enabled = false)
-    public void u1ScheduleFeedU1ScheduleDependantProcessU2UpdateProcess() throws Exception {
-        String feed = bundles[0].getInputFeedFromBundle();
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2099-01-02T01:00Z");
-        //submit both feeds
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeeds(prism);
-        //schedule input feed by U1
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-
-        //by U1 schedule process dependent on scheduled feed by U1
-        ServiceResponse serviceResponse = prism.getProcessHelper().submitAndSchedule(bundles[0].getProcessData());
-        AssertUtil.assertSucceeded(serviceResponse);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0].getProcessData(), Job.Status.RUNNING);
-
-        //get old process details
-        String oldProcessBundleId = OozieUtil
-                .getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        String oldProcessUser = getBundleUser(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        //get old feed details
-        String oldFeedBundleId = OozieUtil.getLatestBundleID(clusterOC, Util.readEntityName(feed), EntityType.FEED);
-
-        //update process by U2
-        ProcessMerlin processObj = bundles[0].getProcessObject().withProperty("randomProp", "randomVal");
-        serviceResponse = prism.getProcessHelper().update(bundles[0].getProcessData(), processObj.toString(),
-            MerlinConstants.USER2_NAME);
-        AssertUtil.assertSucceeded(serviceResponse);
-
-        //new feed bundle should not be created
-        OozieUtil.verifyNewBundleCreation(clusterOC, oldFeedBundleId, null, feed, false, false);
-
-        //new process bundle should be created by U2
-        OozieUtil.verifyNewBundleCreation(
-            clusterOC, oldProcessBundleId, null, bundles[0].getProcessData(), true, false);
-        String newProcessUser = getBundleUser(clusterOC, processObj.getName(), EntityType.PROCESS);
-        Assert.assertNotEquals(oldProcessUser, newProcessUser, "User should not be the same");
-        Assert.assertEquals(MerlinConstants.USER2_NAME, newProcessUser);
-    }
-
-    private String getBundleUser(OozieClient oozieClient, String entityName, EntityType entityType)
-        throws OozieClientException {
-        String newBundleId = OozieUtil.getLatestBundleID(oozieClient, entityName, entityType);
-        BundleJob newBundleJob = oozieClient.getBundleJobInfo(newBundleId);
-        CoordinatorJob coordinatorJob = null;
-        for (CoordinatorJob coord : newBundleJob.getCoordinators()) {
-            if ((entityType == EntityType.PROCESS && coord.getAppName().contains("DEFAULT"))
-                || (entityType == EntityType.FEED && coord.getAppName().contains("RETENTION"))) {
-                coordinatorJob = coord;
-            }
-        }
-        Assert.assertNotNull(coordinatorJob);
-        return coordinatorJob.getUser();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/CombinedActionsTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/CombinedActionsTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/CombinedActionsTest.java
deleted file mode 100644
index 7dd3d96..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/CombinedActionsTest.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.HCatUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.HCatCreateTableDesc;
-import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.joda.time.format.DateTimeFormat;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Test where a single workflow contains multiple actions.
- */
-@Test(groups = "embedded")
-public class CombinedActionsTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private HCatClient clusterHC;
-
-    private final String hiveTestDir = "/HiveData";
-    private final String baseTestHDFSDir = cleanAndGetTestDir() + hiveTestDir;
-    private final String inputHDFSDir = baseTestHDFSDir + "/input";
-    private final String outputHDFSDir = baseTestHDFSDir + "/output";
-    private String aggregateWorkflowDir = cleanAndGetTestDir() + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(CombinedActionsTest.class);
-    private static final String HCATDIR = OSUtil.concat("src", "test", "resources", "hcat");
-    private static final String LOCALHCATDATA = OSUtil.concat(HCATDIR, "data");
-    public static final String DBNAME = "default";
-    public static final String COL1NAME = "id";
-    public static final String COL2NAME = "value";
-    public static final String PARTITIONCOLUMN = "dt";
-    private final String inputTableName = "combinedactionstest_input_table";
-    private final String outputTableName = "combinedactionstest_output_table";
-
-    private String pigMrTestDir = cleanAndGetTestDir() + "/pigMrData";
-    private String inputPath = pigMrTestDir + "/input/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}";
-    private String outputPathPig = pigMrTestDir + "/output/pig/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}";
-    private String outputPathMr = pigMrTestDir + "/output/mr/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}";
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.OOZIE_COMBINED_ACTIONS);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp(Method method) throws Exception {
-        LOGGER.info("test name: " + method.getName());
-        clusterHC = cluster.getClusterHelper().getHCatClient();
-        bundles[0] = BundleUtil.readCombinedActionsBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        AssertUtil.assertSucceeded(prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0)));
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown()throws Exception {
-        removeTestClassEntities();
-        clusterHC.dropTable(DBNAME, inputTableName, true);
-        clusterHC.dropTable(DBNAME, outputTableName, true);
-        HadoopUtil.deleteDirIfExists(pigMrTestDir, clusterFS);
-    }
-
-    /**
-     *Schedule a process, for which the oozie workflow contains multiple actions like hive, mr, pig
-     *The process should succeed. Fails right now due to: https://issues.apache.org/jira/browse/FALCON-670
-     *
-     * @throws Exception
-    */
-
-    @Test
-    public void combinedMrPigHiveAction()throws Exception{
-
-        //create data for pig, mr and hcat jobs
-        final String startDate = "2010-01-01T20:00Z";
-        final String endDate = "2010-01-02T04:00Z";
-
-        String inputFeedMrPig = bundles[0].getFeed("sampleFeed1");
-        FeedMerlin feedObj = new FeedMerlin(inputFeedMrPig);
-
-        HadoopUtil.deleteDirIfExists(pigMrTestDir + "/input", clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 20);
-
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.concat(OSUtil.NORMAL_INPUT, pigMrTestDir, "input"),
-            dataDates);
-
-        final String datePattern = StringUtils.join(new String[] { "yyyy", "MM", "dd", "HH", "mm"}, "-");
-        dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 60, DateTimeFormat.forPattern(datePattern));
-
-        final List<String> dataset = HadoopUtil.flattenAndPutDataInFolder(clusterFS, LOCALHCATDATA,
-                inputHDFSDir, dataDates);
-
-        ArrayList<HCatFieldSchema> cols = new ArrayList<>();
-        cols.add(HCatUtil.getStringSchema(COL1NAME, COL1NAME + " comment"));
-        cols.add(HCatUtil.getStringSchema(COL2NAME, COL2NAME + " comment"));
-        ArrayList<HCatFieldSchema> partitionCols = new ArrayList<>();
-
-        partitionCols.add(HCatUtil.getStringSchema(PARTITIONCOLUMN, PARTITIONCOLUMN + " partition"));
-        clusterHC.createTable(HCatCreateTableDesc
-                .create(DBNAME, inputTableName, cols)
-                .partCols(partitionCols)
-                .ifNotExists(true)
-                .isTableExternal(true)
-                .location(inputHDFSDir)
-                .build());
-
-        clusterHC.createTable(HCatCreateTableDesc
-                .create(DBNAME, outputTableName, cols)
-                .partCols(partitionCols)
-                .ifNotExists(true)
-                .isTableExternal(true)
-                .location(outputHDFSDir)
-                .build());
-
-        HCatUtil.addPartitionsToTable(clusterHC, dataDates, dataset, "dt", DBNAME, inputTableName);
-
-        final String tableUriPartitionFragment = StringUtils.join(
-            new String[]{"#dt=${YEAR}", "${MONTH}", "${DAY}", "${HOUR}", "${MINUTE}"}, "-");
-        String inputTableUri =
-            "catalog:" + DBNAME + ":" + inputTableName + tableUriPartitionFragment;
-        String outputTableUri =
-            "catalog:" + DBNAME + ":" + outputTableName + tableUriPartitionFragment;
-
-        //Set input and output feeds for bundle
-        //input feed for both mr and pig jobs
-        feedObj.setLocation(LocationType.DATA, inputPath);
-        LOGGER.info(feedObj.toString());
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feedObj.toString()));
-
-        //output feed for pig jobs
-        String outputFeedPig = bundles[0].getFeed("sampleFeed2");
-        feedObj = new FeedMerlin(outputFeedPig);
-        feedObj.setLocation(LocationType.DATA, outputPathPig);
-        feedObj.setFrequency(new Frequency("5", Frequency.TimeUnit.minutes));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feedObj.toString()));
-
-        //output feed for mr jobs
-        String outputFeedMr = bundles[0].getFeed("sampleFeed3");
-        feedObj = new FeedMerlin(outputFeedMr);
-        feedObj.setLocation(LocationType.DATA, outputPathMr);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feedObj.toString()));
-
-        //input feed for hcat jobs
-        String inputHive = bundles[0].getFeed("sampleFeedHCat1");
-        feedObj = new FeedMerlin(inputHive);
-        feedObj.getTable().setUri(inputTableUri);
-        feedObj.setFrequency(new Frequency("1", Frequency.TimeUnit.hours));
-        feedObj.getClusters().getClusters().get(0).getValidity()
-            .setStart(TimeUtil.oozieDateToDate(startDate).toDate());
-        feedObj.getClusters().getClusters().get(0).getValidity()
-            .setEnd(TimeUtil.oozieDateToDate(endDate).toDate());
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feedObj.toString()));
-
-        //output feed for hcat jobs
-        String outputHive = bundles[0].getFeed("sampleFeedHCat2");
-        feedObj = new FeedMerlin(outputHive);
-        feedObj.getTable().setUri(outputTableUri);
-        feedObj.setFrequency(new Frequency("1", Frequency.TimeUnit.hours));
-        feedObj.getClusters().getClusters().get(0).getValidity()
-            .setStart(TimeUtil.oozieDateToDate(startDate).toDate());
-        feedObj.getClusters().getClusters().get(0).getValidity()
-            .setEnd(TimeUtil.oozieDateToDate(endDate).toDate());
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feedObj.toString()));
-
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessValidity(startDate, endDate);
-        bundles[0].setProcessPeriodicity(1, Frequency.TimeUnit.hours);
-        bundles[0].setProcessInputStartEnd("now(0,0)", "now(0,0)");
-        AssertUtil.assertSucceeded(prism.getProcessHelper().submitAndSchedule(bundles[0].getProcessData()));
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(),
-            1, CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpCurrentAndLastWeekTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpCurrentAndLastWeekTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpCurrentAndLastWeekTest.java
deleted file mode 100644
index 3eb7bed..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpCurrentAndLastWeekTest.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-import org.testng.annotations.DataProvider;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * EL Expression Current and last week test.
- */
-
-@Test(groups = "embedded")
-public class ELExpCurrentAndLastWeekTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(ELExpCurrentAndLastWeekTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-
-        Bundle bundle = BundleUtil.readELBundle();
-        bundle.generateUniqueBundle(this);
-        bundle = new Bundle(bundle, cluster);
-
-        bundle.setInputFeedDataPath(baseTestDir + "/testData" + MINUTE_DATE_PATTERN);
-        bundle.setProcessWorkflow(aggregateWorkflowDir);
-
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        String processStart = "2015-02-17T10:30Z";
-        String processEnd = "2015-02-17T10:50Z";
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(baseTestDir + "/testData" + MINUTE_DATE_PATTERN);
-        bundles[0].setOutputFeedLocationData(baseTestDir + "/output" + MINUTE_DATE_PATTERN);
-        bundles[0].setInputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setInputFeedValidity("2010-04-01T00:00Z", "2099-04-01T00:00Z");
-        LOGGER.info("processStart: " + processStart + " processEnd: " + processEnd);
-        bundles[0].setProcessValidity(processStart, processEnd);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /* Test cases to check currentWeek and lastWeek EL-Expressions.
-    * Finding the missing dependencies of coordiantor based on
-    * given el definition in entity and creating them.
-    * These should match with the expected missing dependencies.
-    * In case they dont match, the test should fail.
-    *
-    * */
-    @Test(groups = {"singleCluster"}, dataProvider = "EL-DP-Cases")
-    public void currentAndLastWeekTest(String startInstance, String endInstance,
-            String firstDep, String endDep) throws Exception {
-        bundles[0].setDatasetInstances(startInstance, endInstance);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, bundles[0], Job.Status.RUNNING);
-
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        List<String> missingDependencies = getMissingDependencies(clusterOC, bundles[0].getProcessName());
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, bundles[0].getProcessName(), 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        List<String> qaDependencies = getQADepedencyList(bundles[0], firstDep, endDep);
-        Assert.assertTrue(matchDependencies(missingDependencies, qaDependencies));
-    }
-
-    @DataProvider(name = "EL-DP-Cases")
-    public Object[][] getELData() {
-        return new Object[][]{
-            {"currentWeek('WED',2,15)", "currentWeek('WED',2,25)", "2015-02-11T02:15Z", "2015-02-11T02:25Z"},
-            {"currentWeek('WED',21,60)", "currentWeek('WED',22,10)", "2015-02-11T22:00Z", "2015-02-11T22:10Z"},
-            {"currentWeek('WED',24,60)", "currentWeek('THU',01,10)", "2015-02-12T01:00Z", "2015-02-12T01:10Z"},
-            {"currentWeek('WED',04,-60)", "currentWeek('WED',04,10)", "2015-02-11T03:00Z", "2015-02-11T04:10Z"},
-            {"currentWeek('SAT',-04,-60)", "currentWeek('SAT',-04,-40)", "2015-02-13T19:00Z", "2015-02-13T19:20Z"},
-            {"currentWeek('SAT',-24,-60)", "currentWeek('SAT',-24,-40)", "2015-02-12T23:00Z", "2015-02-12T23:20Z"},
-            {"lastWeek('THU',-24,-20)", "lastWeek('THU',-24,-05)", "2015-02-03T23:40Z", "2015-02-03T23:55Z"},
-            {"lastWeek('WED',2,15)", "lastWeek('WED',2,25)", "2015-02-04T02:15Z", "2015-02-04T02:25Z"},
-            {"lastWeek('WED',21,60)", "lastWeek('WED',22,10)", "2015-02-04T22:00Z", "2015-02-04T22:10Z"},
-            {"lastWeek('WED',24,60)", "lastWeek('THU',01,10)", "2015-02-05T01:00Z", "2015-02-05T01:10Z"},
-            {"lastWeek('WED',04,-60)", "lastWeek('WED',04,10)", "2015-02-04T03:00Z", "2015-02-04T04:10Z"},
-            {"lastWeek('FRI',01,05)", "lastWeek('FRI',01,20)", "2015-02-06T01:05Z", "2015-02-06T01:20Z"},
-            {"lastWeek('FRI',01,60)", "lastWeek('FRI',02,20)", "2015-02-06T02:00Z", "2015-02-06T02:20Z"},
-            {"lastWeek('FRI',24,00)", "lastWeek('SAT',00,20)", "2015-02-07T00:00Z", "2015-02-07T00:20Z"},
-            {"lastWeek('THU',-04,-20)", "lastWeek('THU',-04,-05)", "2015-02-04T19:40Z", "2015-02-04T19:55Z"},
-        };
-    }
-
-    private boolean matchDependencies(List<String> fromJob, List<String> qaList) {
-        Collections.sort(fromJob);
-        Collections.sort(qaList);
-        if (fromJob.size() != qaList.size()) {
-            return false;
-        }
-        for (int index = 0; index < fromJob.size(); index++) {
-            if (!fromJob.get(index).contains(qaList.get(index))) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    public List<String> getMissingDependencies(OozieClient oozieClient,
-                                              String processName) throws OozieClientException {
-        List<String> bundles = OozieUtil.getBundles(oozieClient, processName, EntityType.PROCESS);
-        String coordID = bundles.get(0);
-        List<String> missingDependencies = OozieUtil.getMissingDependencies(oozieClient, coordID);
-        for (int i = 0; i < 10 && missingDependencies == null; ++i) {
-            TimeUtil.sleepSeconds(30);
-            missingDependencies = OozieUtil.getMissingDependencies(oozieClient, coordID);
-        }
-        Assert.assertNotNull(missingDependencies, "Missing dependencies not found.");
-        return missingDependencies;
-    }
-
-    private List<String> getQADepedencyList(Bundle bundle, String firstDep, String endDep) {
-        String path = baseTestDir + "/testData/";
-        List<String> returnList = new ArrayList<>();
-        List<String> dataSets = TimeUtil.getMinuteDatesOnEitherSide(firstDep,
-                endDep, bundle.getInitialDatasetFrequency());
-        for (String str : dataSets) {
-            returnList.add(path + str);
-        }
-        return returnList;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpFutureAndLatestTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpFutureAndLatestTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpFutureAndLatestTest.java
deleted file mode 100644
index 90826f1..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELExpFutureAndLatestTest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * EL Expression test.
- */
-@Test(groups = "embedded")
-public class ELExpFutureAndLatestTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(ELExpFutureAndLatestTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-
-        Bundle b = BundleUtil.readELBundle();
-        b.generateUniqueBundle(this);
-        b = new Bundle(b, cluster);
-
-        String startDate = TimeUtil.getTimeWrtSystemTime(-20);
-        String endDate = TimeUtil.getTimeWrtSystemTime(70);
-
-        b.setInputFeedDataPath(baseTestDir + "/testData" + MINUTE_DATE_PATTERN);
-        b.setProcessWorkflow(aggregateWorkflowDir);
-
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 1);
-
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT,
-            b.getFeedDataPathPrefix(), dataDates);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(baseTestDir + "/testData" + MINUTE_DATE_PATTERN);
-        bundles[0].setOutputFeedLocationData(baseTestDir + "/output" + MINUTE_DATE_PATTERN);
-        bundles[0].setInputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setInputFeedValidity("2010-04-01T00:00Z", "2099-04-01T00:00Z");
-        String processStart = TimeUtil.getTimeWrtSystemTime(-3);
-        String processEnd = TimeUtil.getTimeWrtSystemTime(8);
-        LOGGER.info("processStart: " + processStart + " processEnd: " + processEnd);
-        bundles[0].setProcessValidity(processStart, processEnd);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(groups = {"singleCluster"})
-    public void latestTest() throws Exception {
-        bundles[0].setDatasetInstances("latest(-3)", "latest(0)");
-        AssertUtil.assertSucceeded(bundles[0].submitFeedsScheduleProcess(prism));
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-    }
-
-    @Test(groups = {"singleCluster"})
-    public void futureTest() throws Exception {
-        bundles[0].setDatasetInstances("future(0,10)", "future(3,10)");
-        AssertUtil.assertSucceeded(bundles[0].submitFeedsScheduleProcess(prism));
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-    }
-}


[28/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java b/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
deleted file mode 100644
index 29f933d..0000000
--- a/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
+++ /dev/null
@@ -1,1228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.GraphQuery;
-import com.tinkerpop.blueprints.Vertex;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EntityBuilderTestUtil;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.retention.EvictedInstanceSerDe;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.service.Services;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.falcon.workflow.WorkflowExecutionArgs;
-import org.apache.falcon.workflow.WorkflowExecutionContext;
-import org.apache.falcon.workflow.WorkflowJobEndNotificationService;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.falcon.workflow.WorkflowExecutionContext.EntityOperations;
-
-/**
- * Test for Metadata relationship mapping service.
- */
-public class MetadataMappingServiceTest {
-
-    public static final String FALCON_USER = "falcon-user";
-    private static final String LOGS_DIR = "jail://global:00/falcon/staging/feed/logs";
-    private static final String NOMINAL_TIME = "2014-01-01-01-00";
-
-    public static final String CLUSTER_ENTITY_NAME = "primary-cluster";
-    public static final String BCP_CLUSTER_ENTITY_NAME = "bcp-cluster";
-    public static final String PROCESS_ENTITY_NAME = "sample-process";
-    public static final String COLO_NAME = "west-coast";
-    public static final String GENERATE_WORKFLOW_NAME = "imp-click-join-workflow";
-    public static final String REPLICATION_WORKFLOW_NAME = "replication-policy-workflow";
-    private static final String EVICTION_WORKFLOW_NAME = "eviction-policy-workflow";
-    public static final String WORKFLOW_VERSION = "1.0.9";
-
-    public static final String INPUT_FEED_NAMES = "impression-feed#clicks-feed";
-    public static final String INPUT_INSTANCE_PATHS =
-        "jail://global:00/falcon/impression-feed/2014/01/01,jail://global:00/falcon/impression-feed/2014/01/02"
-                + "#jail://global:00/falcon/clicks-feed/2014-01-01";
-    public static final String INPUT_INSTANCE_PATHS_NO_DATE =
-            "jail://global:00/falcon/impression-feed,jail://global:00/falcon/impression-feed"
-                    + "#jail://global:00/falcon/clicks-feed";
-
-    public static final String OUTPUT_FEED_NAMES = "imp-click-join1,imp-click-join2";
-    public static final String OUTPUT_INSTANCE_PATHS =
-        "jail://global:00/falcon/imp-click-join1/20140101,jail://global:00/falcon/imp-click-join2/20140101";
-    private static final String REPLICATED_FEED = "raw-click";
-    private static final String EVICTED_FEED = "imp-click-join1";
-    private static final String EVICTED_INSTANCE_PATHS =
-            "jail://global:00/falcon/imp-click-join1/20140101,jail://global:00/falcon/imp-click-join1/20140102";
-    public static final String OUTPUT_INSTANCE_PATHS_NO_DATE =
-            "jail://global:00/falcon/imp-click-join1,jail://global:00/falcon/imp-click-join2";
-    public static final String COUNTERS = "TIMETAKEN:36956,COPY:30,BYTESCOPIED:1000";
-
-    public static final String BROKER = "org.apache.activemq.ActiveMQConnectionFactory";
-
-    private ConfigurationStore configStore;
-    private MetadataMappingService service;
-
-    private Cluster clusterEntity;
-    private Cluster anotherCluster;
-    private List<Feed> inputFeeds = new ArrayList<>();
-    private List<Feed> outputFeeds = new ArrayList<>();
-    private Process processEntity;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        CurrentUser.authenticate(FALCON_USER);
-
-        configStore = ConfigurationStore.get();
-
-        Services.get().register(new WorkflowJobEndNotificationService());
-        StartupProperties.get().setProperty("falcon.graph.storage.directory",
-                "target/graphdb-" + System.currentTimeMillis());
-        StartupProperties.get().setProperty("falcon.graph.preserve.history", "true");
-        service = new MetadataMappingService();
-        service.init();
-
-        Set<String> vertexPropertyKeys = service.getVertexIndexedKeys();
-        System.out.println("Got vertex property keys: " + vertexPropertyKeys);
-
-        Set<String> edgePropertyKeys = service.getEdgeIndexedKeys();
-        System.out.println("Got edge property keys: " + edgePropertyKeys);
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        GraphUtils.dump(service.getGraph(), System.out);
-
-        cleanUp();
-        StartupProperties.get().setProperty("falcon.graph.preserve.history", "false");
-    }
-
-    @AfterMethod
-    public void printGraph() throws Exception {
-        GraphUtils.dump(service.getGraph());
-    }
-
-    private GraphQuery getQuery() {
-        return service.getGraph().query();
-    }
-
-    @Test
-    public void testGetName() throws Exception {
-        Assert.assertEquals(service.getName(), MetadataMappingService.SERVICE_NAME);
-    }
-
-    @Test
-    public void testOnAddClusterEntity() throws Exception {
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-
-        verifyEntityWasAddedToGraph(CLUSTER_ENTITY_NAME, RelationshipType.CLUSTER_ENTITY);
-        verifyClusterEntityEdges();
-
-        // +4 = cluster, colo, tag, user
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 4);
-        // +3 = cluster to colo, user and tag
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 3);
-    }
-
-    @Test (dependsOnMethods = "testOnAddClusterEntity")
-    public void testOnAddFeedEntity() throws Exception {
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        Feed impressionsFeed = addFeedEntity("impression-feed", clusterEntity,
-                "classified-as=Secure", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/impression-feed/${YEAR}/${MONTH}/${DAY}");
-        inputFeeds.add(impressionsFeed);
-        verifyEntityWasAddedToGraph(impressionsFeed.getName(), RelationshipType.FEED_ENTITY);
-        verifyFeedEntityEdges(impressionsFeed.getName(), "Secure", "analytics");
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 3); // +3 = feed, tag, group,
-        // user
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 4); // +4 = cluster, tag, group, user
-
-        // Get the before vertices and edges
-        beforeVerticesCount = getVerticesCount(service.getGraph());
-        beforeEdgesCount = getEdgesCount(service.getGraph());
-        Feed clicksFeed = addFeedEntity("clicks-feed", clusterEntity,
-                "classified-as=Secure,classified-as=Financial", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/clicks-feed/${YEAR}-${MONTH}-${DAY}");
-        inputFeeds.add(clicksFeed);
-        verifyEntityWasAddedToGraph(clicksFeed.getName(), RelationshipType.FEED_ENTITY);
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 2); // feed and financial vertex
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 5); // +5 = cluster + user + 2Group
-        // + Tag
-
-        // Get the before vertices and edges
-        beforeVerticesCount = getVerticesCount(service.getGraph());
-        beforeEdgesCount = getEdgesCount(service.getGraph());
-        Feed join1Feed = addFeedEntity("imp-click-join1", clusterEntity,
-                "classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join1/${YEAR}${MONTH}${DAY}");
-        outputFeeds.add(join1Feed);
-        verifyEntityWasAddedToGraph(join1Feed.getName(), RelationshipType.FEED_ENTITY);
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 3); // + 3 = 1 feed and 2
-        // groups
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 5); // +5 = cluster + user +
-        // Group + 2Tags
-
-        // Get the before vertices and edges
-        beforeVerticesCount = getVerticesCount(service.getGraph());
-        beforeEdgesCount = getEdgesCount(service.getGraph());
-        Feed join2Feed = addFeedEntity("imp-click-join2", clusterEntity,
-                "classified-as=Secure,classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join2/${YEAR}${MONTH}${DAY}");
-        outputFeeds.add(join2Feed);
-        verifyEntityWasAddedToGraph(join2Feed.getName(), RelationshipType.FEED_ENTITY);
-
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 1); // +1 feed
-        // +6 = user + 2tags + 2Groups + Cluster
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 6);
-    }
-
-    @Test (dependsOnMethods = "testOnAddFeedEntity")
-    public void testOnAddProcessEntity() throws Exception {
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                WORKFLOW_VERSION, inputFeeds, outputFeeds);
-
-        verifyEntityWasAddedToGraph(processEntity.getName(), RelationshipType.PROCESS_ENTITY);
-        verifyProcessEntityEdges();
-
-        // +4 = 1 process + 1 tag + 2 pipeline
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 4);
-        // +9 = user,tag,cluster, 2 inputs,2 outputs, 2 pipelines
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 9);
-    }
-
-    @Test (dependsOnMethods = "testOnAddProcessEntity")
-    public void testOnAdd() throws Exception {
-        verifyEntityGraph(RelationshipType.FEED_ENTITY, "Secure");
-    }
-
-    @Test
-    public void testMapLineage() throws Exception {
-        setup();
-
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                EntityOperations.GENERATE, GENERATE_WORKFLOW_NAME, null, null, null, null)
-                , WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-        verifyLineageGraph(RelationshipType.FEED_INSTANCE.getName());
-
-        // +6 = 1 process, 2 inputs = 3 instances,2 outputs
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 6);
-        //+40 = +26 for feed instances + 8 for process instance + 6 for second feed instance
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 40);
-    }
-
-    @Test
-    public void testLineageForNoDateInFeedPath() throws Exception {
-        setupForNoDateInFeedPath();
-
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.GENERATE, GENERATE_WORKFLOW_NAME, null,
-                        OUTPUT_INSTANCE_PATHS_NO_DATE, INPUT_INSTANCE_PATHS_NO_DATE, null),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-
-        // Verify if instance name has nominal time
-        List<String> feedNamesOwnedByUser = getFeedsOwnedByAUser(
-                RelationshipType.FEED_INSTANCE.getName());
-        List<String> expected = Arrays.asList("impression-feed/2014-01-01T01:00Z", "clicks-feed/2014-01-01T01:00Z",
-                "imp-click-join1/2014-01-01T01:00Z", "imp-click-join2/2014-01-01T01:00Z");
-        Assert.assertTrue(feedNamesOwnedByUser.containsAll(expected));
-
-        // +5 = 1 process, 2 inputs, 2 outputs
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 5);
-        //+34 = +26 for feed instances + 8 for process instance
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 34);
-    }
-
-    @Test
-    public void testLineageForReplication() throws Exception {
-        setupForLineageReplication();
-
-        // Get the before vertices and edges
-        // +7 [primary, bcp cluster] = cluster, colo, tag, user
-        // +3 [input feed] = feed, tag, group
-        // +4 [output feed] = 1 feed + 1 tag + 2 groups
-        // +4 [process] = 1 process + 1 tag + 2 pipeline
-        // +3 = 1 process, 1 input, 1 output
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-
-        // +4 [cluster] = cluster to colo and tag [primary and bcp],
-        // +4 [input feed] = cluster, tag, group, user
-        // +5 [output feed] = cluster + user + Group + 2Tags
-        // +7 = user,tag,cluster, 1 input,1 output, 2 pipelines
-        // +19 = +6 for output feed instances + 7 for process instance + 6 for input feed instance
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.REPLICATE, REPLICATION_WORKFLOW_NAME, REPLICATED_FEED,
-                        "jail://global:00/falcon/raw-click/bcp/20140101",
-                        "jail://global:00/falcon/raw-click/primary/20140101", REPLICATED_FEED),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-
-        verifyLineageGraphForReplicationOrEviction(REPLICATED_FEED,
-                "jail://global:00/falcon/raw-click/bcp/20140101", context,
-                RelationshipLabel.FEED_CLUSTER_REPLICATED_EDGE);
-
-        // No new vertex added after replication
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 0);
-
-        // +1 for replicated-to edge to target cluster for each output feed instance
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 1);
-    }
-
-    @Test
-    public void testLineageForReplicationForNonGeneratedInstances() throws Exception {
-        cleanUp();
-        service.init();
-        addClusterAndFeedForReplication(inputFeeds);
-        // Get the vertices before running replication WF
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.REPLICATE, REPLICATION_WORKFLOW_NAME, REPLICATED_FEED,
-                        "jail://global:00/falcon/raw-click/bcp/20140101",
-                        "jail://global:00/falcon/raw-click/primary/20140101", REPLICATED_FEED),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-
-        verifyFeedEntityEdges(REPLICATED_FEED, "Secure", "analytics");
-        verifyLineageGraphForReplicationOrEviction(REPLICATED_FEED,
-                "jail://global:00/falcon/raw-click/bcp/20140101", context,
-                RelationshipLabel.FEED_CLUSTER_REPLICATED_EDGE);
-
-        // +1 for the new instance vertex added
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 1);
-        // +6 = instance-of, stored-in, owned-by, classification, group, replicated-to
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 6);
-    }
-
-    @Test
-    public void testLineageForRetention() throws Exception {
-        setupForLineageEviction();
-        // Get the before vertices and edges
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.DELETE, EVICTION_WORKFLOW_NAME,
-                        EVICTED_FEED, EVICTED_INSTANCE_PATHS, "IGNORE", EVICTED_FEED),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-        List<String> expectedFeeds = Arrays.asList("impression-feed/2014-01-01T00:00Z", "clicks-feed/2014-01-01T00:00Z",
-                "imp-click-join1/2014-01-01T00:00Z", "imp-click-join1/2014-01-02T00:00Z");
-        List<String> secureFeeds = Arrays.asList("impression-feed/2014-01-01T00:00Z",
-                "clicks-feed/2014-01-01T00:00Z");
-        List<String> ownedAndSecureFeeds = Arrays.asList("clicks-feed/2014-01-01T00:00Z",
-                "imp-click-join1/2014-01-01T00:00Z", "imp-click-join1/2014-01-02T00:00Z");
-        verifyLineageGraph(RelationshipType.FEED_INSTANCE.getName(), expectedFeeds, secureFeeds, ownedAndSecureFeeds);
-        String[] paths = EVICTED_INSTANCE_PATHS.split(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR);
-        for (String feedInstanceDataPath : paths) {
-            verifyLineageGraphForReplicationOrEviction(EVICTED_FEED, feedInstanceDataPath, context,
-                    RelationshipLabel.FEED_CLUSTER_EVICTED_EDGE);
-        }
-
-        // No new vertices added
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 0);
-        // +2 for evicted-from edge from Feed Instance vertex to cluster
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 2);
-    }
-
-    @Test
-    public void testLineageForRetentionWithNoFeedsEvicted() throws Exception {
-        cleanUp();
-        service.init();
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.DELETE, EVICTION_WORKFLOW_NAME,
-                        EVICTED_FEED, "IGNORE", "IGNORE", EVICTED_FEED),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-
-        service.onSuccess(context);
-
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-        // No new vertices added
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount);
-        // No new edges added
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount);
-    }
-
-    @Test (dependsOnMethods = "testOnAdd")
-    public void testOnChange() throws Exception {
-        // shutdown the graph and resurrect for testing
-        service.destroy();
-        service.init();
-
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        // cannot modify cluster, adding a new cluster
-        anotherCluster = addClusterEntity("another-cluster", "east-coast",
-                "classification=another");
-        verifyEntityWasAddedToGraph("another-cluster", RelationshipType.CLUSTER_ENTITY);
-
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 3); // +3 = cluster, colo, tag
-        // +3 edges to user, colo and new tag
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 3);
-    }
-
-    @Test(dependsOnMethods = "testOnChange")
-    public void testOnFeedEntityChange() throws Exception {
-        Feed oldFeed = inputFeeds.get(0);
-        Feed newFeed = EntityBuilderTestUtil.buildFeed(oldFeed.getName(), clusterEntity,
-                "classified-as=Secured,source=data-warehouse", "reporting");
-        addStorage(newFeed, Storage.TYPE.FILESYSTEM,
-                "jail://global:00/falcon/impression-feed/20140101");
-
-        long beforeVerticesCount = 0;
-        long beforeEdgesCount = 0;
-
-        try {
-            configStore.initiateUpdate(newFeed);
-
-            beforeVerticesCount = getVerticesCount(service.getGraph());
-            beforeEdgesCount = getEdgesCount(service.getGraph());
-
-            // add cluster
-            org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                    new org.apache.falcon.entity.v0.feed.Cluster();
-            feedCluster.setName(anotherCluster.getName());
-            newFeed.getClusters().getClusters().add(feedCluster);
-
-            configStore.update(EntityType.FEED, newFeed);
-        } finally {
-            configStore.cleanupUpdateInit();
-        }
-
-        verifyUpdatedEdges(newFeed);
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 2); //+2 = 2 new tags
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount + 2); // +2 = 1 new cluster, 1 new tag
-    }
-
-    @Test
-    public void testLineageForTransactionFailure() throws Exception {
-        cleanUp();
-        service.init();
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-        verifyEntityWasAddedToGraph(CLUSTER_ENTITY_NAME, RelationshipType.CLUSTER_ENTITY);
-        verifyClusterEntityEdges();
-        Assert.assertEquals(getVerticesCount(service.getGraph()), 4); // +3 = cluster, colo, user, tag
-        Assert.assertEquals(getEdgesCount(service.getGraph()), 3); // +2 = cluster to colo, user and tag
-
-        Feed feed = EntityBuilderTestUtil.buildFeed("feed-name", new Cluster[]{clusterEntity}, null, null);
-        inputFeeds.add(feed);
-        outputFeeds.add(feed);
-
-        try {
-            processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                    "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                    WORKFLOW_VERSION, inputFeeds, outputFeeds);
-            Assert.fail();
-        } catch (FalconException e) {
-            Assert.assertEquals(getVerticesCount(service.getGraph()), 4);
-            Assert.assertEquals(getEdgesCount(service.getGraph()), 3);
-        }
-
-    }
-
-    private void verifyUpdatedEdges(Feed newFeed) {
-        Vertex feedVertex = getEntityVertex(newFeed.getName(), RelationshipType.FEED_ENTITY);
-
-        // groups
-        Edge edge = feedVertex.getEdges(Direction.OUT, RelationshipLabel.GROUPS.getName()).iterator().next();
-        Assert.assertEquals(edge.getVertex(Direction.IN).getProperty("name"), "reporting");
-
-        // tags
-        edge = feedVertex.getEdges(Direction.OUT, "classified-as").iterator().next();
-        Assert.assertEquals(edge.getVertex(Direction.IN).getProperty("name"), "Secured");
-        edge = feedVertex.getEdges(Direction.OUT, "source").iterator().next();
-        Assert.assertEquals(edge.getVertex(Direction.IN).getProperty("name"), "data-warehouse");
-
-        // new cluster
-        List<String> actual = new ArrayList<>();
-        for (Edge clusterEdge : feedVertex.getEdges(Direction.OUT, RelationshipLabel.FEED_CLUSTER_EDGE.getName())) {
-            actual.add(clusterEdge.getVertex(Direction.IN).<String>getProperty("name"));
-        }
-        Assert.assertTrue(actual.containsAll(Arrays.asList("primary-cluster", "another-cluster")),
-                "Actual does not contain expected: " + actual);
-    }
-
-    @Test(dependsOnMethods = "testOnFeedEntityChange")
-    public void testOnProcessEntityChange() throws Exception {
-        long beforeVerticesCount = getVerticesCount(service.getGraph());
-        long beforeEdgesCount = getEdgesCount(service.getGraph());
-
-        Process oldProcess = processEntity;
-        Process newProcess = EntityBuilderTestUtil.buildProcess(oldProcess.getName(), anotherCluster,
-                null, null);
-        EntityBuilderTestUtil.addProcessWorkflow(newProcess, GENERATE_WORKFLOW_NAME, "2.0.0");
-        EntityBuilderTestUtil.addInput(newProcess, inputFeeds.get(0));
-
-        try {
-            configStore.initiateUpdate(newProcess);
-            configStore.update(EntityType.PROCESS, newProcess);
-        } finally {
-            configStore.cleanupUpdateInit();
-        }
-
-        verifyUpdatedEdges(newProcess);
-        Assert.assertEquals(getVerticesCount(service.getGraph()), beforeVerticesCount + 0); // +0, no net new
-        Assert.assertEquals(getEdgesCount(service.getGraph()), beforeEdgesCount - 6); // -6 = -2 outputs, -1 tag,
-        // -1 cluster, -2 pipelines
-    }
-
-    @Test(dependsOnMethods = "testOnProcessEntityChange")
-    public void testAreSame() throws Exception {
-
-        Inputs inputs1 = new Inputs();
-        Inputs inputs2 = new Inputs();
-        Outputs outputs1 = new Outputs();
-        Outputs outputs2 = new Outputs();
-        // return true when both are null
-        Assert.assertTrue(EntityRelationshipGraphBuilder.areSame(inputs1, inputs2));
-        Assert.assertTrue(EntityRelationshipGraphBuilder.areSame(outputs1, outputs2));
-
-        Input i1 = new Input();
-        i1.setName("input1");
-        Input i2 = new Input();
-        i2.setName("input2");
-        Output o1 = new Output();
-        o1.setName("output1");
-        Output o2 = new Output();
-        o2.setName("output2");
-
-        inputs1.getInputs().add(i1);
-        Assert.assertFalse(EntityRelationshipGraphBuilder.areSame(inputs1, inputs2));
-        outputs1.getOutputs().add(o1);
-        Assert.assertFalse(EntityRelationshipGraphBuilder.areSame(outputs1, outputs2));
-
-        inputs2.getInputs().add(i1);
-        Assert.assertTrue(EntityRelationshipGraphBuilder.areSame(inputs1, inputs2));
-        outputs2.getOutputs().add(o1);
-        Assert.assertTrue(EntityRelationshipGraphBuilder.areSame(outputs1, outputs2));
-    }
-
-    @Test
-    public void testLineageForJobCounter() throws Exception {
-        setupForJobCounters();
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.GENERATE, GENERATE_WORKFLOW_NAME, "IGNORE", "IGNORE", "IGNORE", "NONE"),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-        debug(service.getGraph());
-        GraphUtils.dump(service.getGraph());
-        Graph graph = service.getGraph();
-
-        Vertex vertex = graph.getVertices("name", "sample-process/2014-01-01T01:00Z").iterator().next();
-        Assert.assertEquals(vertex.getProperty("TIMETAKEN"), 36956L);
-        Assert.assertEquals(vertex.getProperty("COPY"), 30L);
-        Assert.assertEquals(vertex.getProperty("BYTESCOPIED"), 1000L);
-        Assert.assertEquals(getVerticesCount(service.getGraph()), 9);
-        Assert.assertEquals(getEdgesCount(service.getGraph()), 14);
-        verifyLineageGraphForJobCounters(context);
-    }
-
-    private void verifyUpdatedEdges(Process newProcess) {
-        Vertex processVertex = getEntityVertex(newProcess.getName(), RelationshipType.PROCESS_ENTITY);
-
-        // cluster
-        Edge edge = processVertex.getEdges(Direction.OUT,
-                RelationshipLabel.PROCESS_CLUSTER_EDGE.getName()).iterator().next();
-        Assert.assertEquals(edge.getVertex(Direction.IN).getProperty("name"), anotherCluster.getName());
-
-        // inputs
-        edge = processVertex.getEdges(Direction.IN, RelationshipLabel.FEED_PROCESS_EDGE.getName()).iterator().next();
-        Assert.assertEquals(edge.getVertex(Direction.OUT).getProperty("name"),
-                newProcess.getInputs().getInputs().get(0).getFeed());
-
-        // outputs
-        for (Edge e : processVertex.getEdges(Direction.OUT, RelationshipLabel.PROCESS_FEED_EDGE.getName())) {
-            Assert.fail("there should not be any edges to output feeds" + e);
-        }
-    }
-
-    public static void debug(final Graph graph) {
-        System.out.println("*****Vertices of " + graph);
-        for (Vertex vertex : graph.getVertices()) {
-            System.out.println(GraphUtils.vertexString(vertex));
-        }
-
-        System.out.println("*****Edges of " + graph);
-        for (Edge edge : graph.getEdges()) {
-            System.out.println(GraphUtils.edgeString(edge));
-        }
-    }
-
-    private Cluster addClusterEntity(String name, String colo, String tags) throws Exception {
-        Cluster cluster = EntityBuilderTestUtil.buildCluster(name, colo, tags);
-        configStore.publish(EntityType.CLUSTER, cluster);
-        return cluster;
-    }
-
-    private Feed addFeedEntity(String feedName, Cluster cluster, String tags, String groups,
-                               Storage.TYPE storageType, String uriTemplate) throws Exception {
-        return addFeedEntity(feedName, new Cluster[]{cluster}, tags, groups, storageType, uriTemplate);
-    }
-
-    private Feed addFeedEntity(String feedName, Cluster[] clusters, String tags, String groups,
-                               Storage.TYPE storageType, String uriTemplate) throws Exception {
-        Feed feed = EntityBuilderTestUtil.buildFeed(feedName, clusters,
-                tags, groups);
-        addStorage(feed, storageType, uriTemplate);
-        for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
-            if (feedCluster.getName().equals(BCP_CLUSTER_ENTITY_NAME)) {
-                feedCluster.setType(ClusterType.TARGET);
-            }
-        }
-        configStore.publish(EntityType.FEED, feed);
-        return feed;
-    }
-
-    //SUSPEND CHECKSTYLE CHECK ParameterNumberCheck
-    public Process addProcessEntity(String processName, Cluster cluster,
-                                    String tags, String pipelineTags, String workflowName,
-                                    String version, List<Feed> inFeeds, List<Feed> outFeeds) throws Exception {
-        Process process = EntityBuilderTestUtil.buildProcess(processName, cluster,
-                tags, pipelineTags);
-        EntityBuilderTestUtil.addProcessWorkflow(process, workflowName, version);
-
-        for (Feed inputFeed : inFeeds) {
-            EntityBuilderTestUtil.addInput(process, inputFeed);
-        }
-
-        for (Feed outputFeed : outFeeds) {
-            EntityBuilderTestUtil.addOutput(process, outputFeed);
-        }
-
-        configStore.publish(EntityType.PROCESS, process);
-        return process;
-    }
-    //RESUME CHECKSTYLE CHECK ParameterNumberCheck
-
-    private static void addStorage(Feed feed, Storage.TYPE storageType, String uriTemplate) {
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            Locations locations = new Locations();
-            feed.setLocations(locations);
-
-            Location location = new Location();
-            location.setType(LocationType.DATA);
-            location.setPath(uriTemplate);
-            feed.getLocations().getLocations().add(location);
-        } else {
-            CatalogTable table = new CatalogTable();
-            table.setUri(uriTemplate);
-            feed.setTable(table);
-        }
-    }
-
-    private static void addStorage(org.apache.falcon.entity.v0.feed.Cluster cluster, Feed feed,
-                                   Storage.TYPE storageType, String uriTemplate) {
-        if (storageType == Storage.TYPE.FILESYSTEM) {
-            Locations locations = new Locations();
-            feed.setLocations(locations);
-
-            Location location = new Location();
-            location.setType(LocationType.DATA);
-            location.setPath(uriTemplate);
-            cluster.setLocations(new Locations());
-            cluster.getLocations().getLocations().add(location);
-        } else {
-            CatalogTable table = new CatalogTable();
-            table.setUri(uriTemplate);
-            cluster.setTable(table);
-        }
-    }
-
-    private void verifyEntityWasAddedToGraph(String entityName, RelationshipType entityType) {
-        Vertex entityVertex = getEntityVertex(entityName, entityType);
-        Assert.assertNotNull(entityVertex);
-        verifyEntityProperties(entityVertex, entityName, entityType);
-    }
-
-    private void verifyEntityProperties(Vertex entityVertex, String entityName, RelationshipType entityType) {
-        Assert.assertEquals(entityName, entityVertex.getProperty(RelationshipProperty.NAME.getName()));
-        Assert.assertEquals(entityType.getName(), entityVertex.getProperty(RelationshipProperty.TYPE.getName()));
-        Assert.assertNotNull(entityVertex.getProperty(RelationshipProperty.TIMESTAMP.getName()));
-    }
-
-    private void verifyClusterEntityEdges() {
-        Vertex clusterVertex = getEntityVertex(CLUSTER_ENTITY_NAME,
-                RelationshipType.CLUSTER_ENTITY);
-
-        // verify edge to user vertex
-        verifyVertexForEdge(clusterVertex, Direction.OUT, RelationshipLabel.USER.getName(),
-                FALCON_USER, RelationshipType.USER.getName());
-        // verify edge to colo vertex
-        verifyVertexForEdge(clusterVertex, Direction.OUT, RelationshipLabel.CLUSTER_COLO.getName(),
-                COLO_NAME, RelationshipType.COLO.getName());
-        // verify edge to tags vertex
-        verifyVertexForEdge(clusterVertex, Direction.OUT, "classification",
-                "production", RelationshipType.TAGS.getName());
-    }
-
-    private void verifyFeedEntityEdges(String feedName, String tag, String group) {
-        Vertex feedVertex = getEntityVertex(feedName, RelationshipType.FEED_ENTITY);
-
-        // verify edge to cluster vertex
-        verifyVertexForEdge(feedVertex, Direction.OUT, RelationshipLabel.FEED_CLUSTER_EDGE.getName(),
-                CLUSTER_ENTITY_NAME, RelationshipType.CLUSTER_ENTITY.getName());
-        // verify edge to user vertex
-        verifyVertexForEdge(feedVertex, Direction.OUT, RelationshipLabel.USER.getName(),
-                FALCON_USER, RelationshipType.USER.getName());
-
-        // verify edge to tags vertex
-        verifyVertexForEdge(feedVertex, Direction.OUT, "classified-as",
-                tag, RelationshipType.TAGS.getName());
-        // verify edge to group vertex
-        verifyVertexForEdge(feedVertex, Direction.OUT, RelationshipLabel.GROUPS.getName(),
-                group, RelationshipType.GROUPS.getName());
-    }
-
-    private void verifyProcessEntityEdges() {
-        Vertex processVertex = getEntityVertex(PROCESS_ENTITY_NAME,
-                RelationshipType.PROCESS_ENTITY);
-
-        // verify edge to cluster vertex
-        verifyVertexForEdge(processVertex, Direction.OUT, RelationshipLabel.PROCESS_CLUSTER_EDGE.getName(),
-                CLUSTER_ENTITY_NAME, RelationshipType.CLUSTER_ENTITY.getName());
-        // verify edge to user vertex
-        verifyVertexForEdge(processVertex, Direction.OUT, RelationshipLabel.USER.getName(),
-                FALCON_USER, RelationshipType.USER.getName());
-        // verify edge to tags vertex
-        verifyVertexForEdge(processVertex, Direction.OUT, "classified-as",
-                "Critical", RelationshipType.TAGS.getName());
-
-        // verify edges to inputs
-        List<String> actual = new ArrayList<>();
-        for (Edge edge : processVertex.getEdges(Direction.IN,
-                RelationshipLabel.FEED_PROCESS_EDGE.getName())) {
-            Vertex outVertex = edge.getVertex(Direction.OUT);
-            Assert.assertEquals(RelationshipType.FEED_ENTITY.getName(),
-                    outVertex.getProperty(RelationshipProperty.TYPE.getName()));
-            actual.add(outVertex.<String>getProperty(RelationshipProperty.NAME.getName()));
-        }
-
-        Assert.assertTrue(actual.containsAll(Arrays.asList("impression-feed", "clicks-feed")),
-                "Actual does not contain expected: " + actual);
-
-        actual.clear();
-        // verify edges to outputs
-        for (Edge edge : processVertex.getEdges(Direction.OUT,
-                RelationshipLabel.PROCESS_FEED_EDGE.getName())) {
-            Vertex outVertex = edge.getVertex(Direction.IN);
-            Assert.assertEquals(RelationshipType.FEED_ENTITY.getName(),
-                    outVertex.getProperty(RelationshipProperty.TYPE.getName()));
-            actual.add(outVertex.<String>getProperty(RelationshipProperty.NAME.getName()));
-        }
-        Assert.assertTrue(actual.containsAll(Arrays.asList("imp-click-join1", "imp-click-join2")),
-                "Actual does not contain expected: " + actual);
-    }
-
-    private Vertex getEntityVertex(String entityName, RelationshipType entityType) {
-        GraphQuery entityQuery = getQuery()
-                .has(RelationshipProperty.NAME.getName(), entityName)
-                .has(RelationshipProperty.TYPE.getName(), entityType.getName());
-        Iterator<Vertex> iterator = entityQuery.vertices().iterator();
-        Assert.assertTrue(iterator.hasNext());
-
-        Vertex entityVertex = iterator.next();
-        Assert.assertNotNull(entityVertex);
-
-        return entityVertex;
-    }
-
-    private void verifyVertexForEdge(Vertex fromVertex, Direction direction, String label,
-                                     String expectedName, String expectedType) {
-        boolean found = false;
-        for (Edge edge : fromVertex.getEdges(direction, label)) {
-            found = true;
-            Vertex outVertex = edge.getVertex(Direction.IN);
-            Assert.assertEquals(
-                    outVertex.getProperty(RelationshipProperty.NAME.getName()), expectedName);
-            Assert.assertEquals(
-                    outVertex.getProperty(RelationshipProperty.TYPE.getName()), expectedType);
-        }
-        Assert.assertFalse((!found), "Edge not found");
-    }
-
-    private void verifyEntityGraph(RelationshipType feedType, String classification) {
-        // feeds owned by a user
-        List<String> feedNamesOwnedByUser = getFeedsOwnedByAUser(feedType.getName());
-        Assert.assertEquals(feedNamesOwnedByUser,
-                Arrays.asList("impression-feed", "clicks-feed", "imp-click-join1",
-                        "imp-click-join2")
-        );
-
-        // feeds classified as secure
-        verifyFeedsClassifiedAsSecure(feedType.getName(),
-                Arrays.asList("impression-feed", "clicks-feed", "imp-click-join2"));
-
-        // feeds owned by a user and classified as secure
-        verifyFeedsOwnedByUserAndClassification(feedType.getName(), classification,
-                Arrays.asList("impression-feed", "clicks-feed", "imp-click-join2"));
-    }
-
-    private List<String> getFeedsOwnedByAUser(String feedType) {
-        GraphQuery userQuery = getQuery()
-                .has(RelationshipProperty.NAME.getName(), FALCON_USER)
-                .has(RelationshipProperty.TYPE.getName(), RelationshipType.USER.getName());
-
-        List<String> feedNames = new ArrayList<>();
-        for (Vertex userVertex : userQuery.vertices()) {
-            for (Vertex feed : userVertex.getVertices(Direction.IN, RelationshipLabel.USER.getName())) {
-                if (feed.getProperty(RelationshipProperty.TYPE.getName()).equals(feedType)) {
-                    System.out.println(FALCON_USER + " owns -> " + GraphUtils.vertexString(feed));
-                    feedNames.add(feed.<String>getProperty(RelationshipProperty.NAME.getName()));
-                }
-            }
-        }
-
-        return feedNames;
-    }
-
-    private void verifyFeedsClassifiedAsSecure(String feedType, List<String> expected) {
-        GraphQuery classQuery = getQuery()
-                .has(RelationshipProperty.NAME.getName(), "Secure")
-                .has(RelationshipProperty.TYPE.getName(), RelationshipType.TAGS.getName());
-
-        List<String> actual = new ArrayList<>();
-        for (Vertex feedVertex : classQuery.vertices()) {
-            for (Vertex feed : feedVertex.getVertices(Direction.BOTH, "classified-as")) {
-                if (feed.getProperty(RelationshipProperty.TYPE.getName()).equals(feedType)) {
-                    System.out.println(" Secure classification -> " + GraphUtils.vertexString(feed));
-                    actual.add(feed.<String>getProperty(RelationshipProperty.NAME.getName()));
-                }
-            }
-        }
-
-        Assert.assertTrue(actual.containsAll(expected), "Actual does not contain expected: " + actual);
-    }
-
-    private void verifyFeedsOwnedByUserAndClassification(String feedType, String classification,
-                                                         List<String> expected) {
-        List<String> actual = new ArrayList<>();
-        Vertex userVertex = getEntityVertex(FALCON_USER, RelationshipType.USER);
-        for (Vertex feed : userVertex.getVertices(Direction.IN, RelationshipLabel.USER.getName())) {
-            if (feed.getProperty(RelationshipProperty.TYPE.getName()).equals(feedType)) {
-                for (Vertex classVertex : feed.getVertices(Direction.OUT, "classified-as")) {
-                    if (classVertex.getProperty(RelationshipProperty.NAME.getName())
-                            .equals(classification)) {
-                        actual.add(feed.<String>getProperty(RelationshipProperty.NAME.getName()));
-                        System.out.println(classification + " feed owned by falcon-user -> "
-                                + GraphUtils.vertexString(feed));
-                    }
-                }
-            }
-        }
-        Assert.assertTrue(actual.containsAll(expected),
-                "Actual does not contain expected: " + actual);
-    }
-
-    public long getVerticesCount(final Graph graph) {
-        long count = 0;
-        for (Vertex ignored : graph.getVertices()) {
-            count++;
-        }
-
-        return count;
-    }
-
-    public long getEdgesCount(final Graph graph) {
-        long count = 0;
-        for (Edge ignored : graph.getEdges()) {
-            count++;
-        }
-
-        return count;
-    }
-
-    private void verifyLineageGraph(String feedType) {
-        List<String> expectedFeeds = Arrays.asList("impression-feed/2014-01-01T00:00Z", "clicks-feed/2014-01-01T00:00Z",
-                "imp-click-join1/2014-01-01T00:00Z", "imp-click-join2/2014-01-01T00:00Z");
-        List<String> secureFeeds = Arrays.asList("impression-feed/2014-01-01T00:00Z",
-                "clicks-feed/2014-01-01T00:00Z", "imp-click-join2/2014-01-01T00:00Z");
-        List<String> ownedAndSecureFeeds = Arrays.asList("clicks-feed/2014-01-01T00:00Z",
-                "imp-click-join1/2014-01-01T00:00Z", "imp-click-join2/2014-01-01T00:00Z");
-        verifyLineageGraph(feedType, expectedFeeds, secureFeeds, ownedAndSecureFeeds);
-    }
-
-    private void verifyLineageGraph(String feedType, List<String> expectedFeeds,
-                                    List<String> secureFeeds, List<String> ownedAndSecureFeeds) {
-        // feeds owned by a user
-        List<String> feedNamesOwnedByUser = getFeedsOwnedByAUser(feedType);
-        Assert.assertTrue(feedNamesOwnedByUser.containsAll(expectedFeeds));
-
-        Graph graph = service.getGraph();
-
-        Iterator<Vertex> vertices = graph.getVertices("name", "impression-feed/2014-01-01T00:00Z").iterator();
-        Assert.assertTrue(vertices.hasNext());
-        Vertex feedInstanceVertex = vertices.next();
-        Assert.assertEquals(feedInstanceVertex.getProperty(RelationshipProperty.TYPE.getName()),
-                RelationshipType.FEED_INSTANCE.getName());
-
-        Object vertexId = feedInstanceVertex.getId();
-        Vertex vertexById = graph.getVertex(vertexId);
-        Assert.assertEquals(vertexById, feedInstanceVertex);
-
-        // feeds classified as secure
-        verifyFeedsClassifiedAsSecure(feedType, secureFeeds);
-
-        // feeds owned by a user and classified as secure
-        verifyFeedsOwnedByUserAndClassification(feedType, "Financial", ownedAndSecureFeeds);
-    }
-
-    private void verifyLineageGraphForReplicationOrEviction(String feedName, String feedInstanceDataPath,
-                                                            WorkflowExecutionContext context,
-                                                            RelationshipLabel edgeLabel) throws Exception {
-        String feedInstanceName = InstanceRelationshipGraphBuilder.getFeedInstanceName(feedName
-                , context.getClusterName(), feedInstanceDataPath, context.getNominalTimeAsISO8601());
-        Vertex feedVertex = getEntityVertex(feedInstanceName, RelationshipType.FEED_INSTANCE);
-
-        Edge edge = feedVertex.getEdges(Direction.OUT, edgeLabel.getName())
-                .iterator().next();
-        Assert.assertNotNull(edge);
-        Assert.assertEquals(edge.getProperty(RelationshipProperty.TIMESTAMP.getName())
-                , context.getTimeStampAsISO8601());
-
-        Vertex clusterVertex = edge.getVertex(Direction.IN);
-        Assert.assertEquals(clusterVertex.getProperty(RelationshipProperty.NAME.getName()), context.getClusterName());
-    }
-
-    private void verifyLineageGraphForJobCounters(WorkflowExecutionContext context) throws Exception {
-        Vertex processVertex = getEntityVertex(PROCESS_ENTITY_NAME,
-                RelationshipType.PROCESS_ENTITY);
-        Assert.assertEquals(processVertex.getProperty("name"), PROCESS_ENTITY_NAME);
-        Assert.assertTrue(context.getCounters().length()>0);
-    }
-
-    private static String[] getTestMessageArgs(EntityOperations operation, String wfName, String outputFeedNames,
-                                               String feedInstancePaths, String falconInputPaths,
-                                               String falconInputFeeds) {
-        String cluster;
-        if (EntityOperations.REPLICATE == operation) {
-            cluster = BCP_CLUSTER_ENTITY_NAME + WorkflowExecutionContext.CLUSTER_NAME_SEPARATOR + CLUSTER_ENTITY_NAME;
-        } else {
-            cluster = CLUSTER_ENTITY_NAME;
-        }
-
-        return new String[]{
-            "-" + WorkflowExecutionArgs.CLUSTER_NAME.getName(), cluster,
-            "-" + WorkflowExecutionArgs.ENTITY_TYPE.getName(), ("process"),
-            "-" + WorkflowExecutionArgs.ENTITY_NAME.getName(), PROCESS_ENTITY_NAME,
-            "-" + WorkflowExecutionArgs.NOMINAL_TIME.getName(), NOMINAL_TIME,
-            "-" + WorkflowExecutionArgs.OPERATION.getName(), operation.toString(),
-
-            "-" + WorkflowExecutionArgs.INPUT_FEED_NAMES.getName(),
-            (falconInputFeeds != null ? falconInputFeeds : INPUT_FEED_NAMES),
-            "-" + WorkflowExecutionArgs.INPUT_FEED_PATHS.getName(),
-            (falconInputPaths != null ? falconInputPaths : INPUT_INSTANCE_PATHS),
-
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_NAMES.getName(),
-            (outputFeedNames != null ? outputFeedNames : OUTPUT_FEED_NAMES),
-            "-" + WorkflowExecutionArgs.OUTPUT_FEED_PATHS.getName(),
-            (feedInstancePaths != null ? feedInstancePaths : OUTPUT_INSTANCE_PATHS),
-
-            "-" + WorkflowExecutionArgs.WORKFLOW_ID.getName(), "workflow-01-00",
-            "-" + WorkflowExecutionArgs.WORKFLOW_USER.getName(), FALCON_USER,
-            "-" + WorkflowExecutionArgs.RUN_ID.getName(), "1",
-            "-" + WorkflowExecutionArgs.STATUS.getName(), "SUCCEEDED",
-            "-" + WorkflowExecutionArgs.TIMESTAMP.getName(), NOMINAL_TIME,
-
-            "-" + WorkflowExecutionArgs.WF_ENGINE_URL.getName(), "http://localhost:11000/oozie",
-            "-" + WorkflowExecutionArgs.USER_SUBFLOW_ID.getName(), "userflow@wf-id",
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_NAME.getName(), wfName,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_VERSION.getName(), WORKFLOW_VERSION,
-            "-" + WorkflowExecutionArgs.USER_WORKFLOW_ENGINE.getName(), EngineType.PIG.name(),
-
-            "-" + WorkflowExecutionArgs.BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.USER_BRKR_IMPL_CLASS.getName(), BROKER,
-            "-" + WorkflowExecutionArgs.USER_BRKR_URL.getName(), "tcp://localhost:61616?daemon=true",
-            "-" + WorkflowExecutionArgs.BRKR_TTL.getName(), "1000",
-
-            "-" + WorkflowExecutionArgs.LOG_DIR.getName(), LOGS_DIR,
-        };
-    }
-
-    private void setupForJobCounters() throws Exception {
-        cleanUp();
-        service.init();
-        // Add cluster
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-        List<Feed> inFeeds = new ArrayList<>();
-        List<Feed> outFeeds = new ArrayList<>();
-
-        createJobCountersFileForTest();
-        // Add process
-        processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                WORKFLOW_VERSION, inFeeds, outFeeds);
-    }
-
-    private void createJobCountersFileForTest() throws Exception {
-        Path counterFile = new Path(LOGS_DIR, "counter.txt");
-        OutputStream out = null;
-        try {
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(
-                    new Path(LOGS_DIR).toUri());
-            out = fs.create(counterFile);
-            out.write(COUNTERS.getBytes());
-            out.flush();
-        }  finally {
-            out.close();
-        }
-    }
-
-    private void setup() throws Exception {
-        cleanUp();
-        service.init();
-
-        // Add cluster
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-
-        addFeedsAndProcess(clusterEntity);
-    }
-
-    private void addFeedsAndProcess(Cluster cluster) throws Exception  {
-        // Add input and output feeds
-        Feed impressionsFeed = addFeedEntity("impression-feed", cluster,
-                "classified-as=Secure", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/impression-feed/${YEAR}/${MONTH}/${DAY}");
-        List<Feed> inFeeds = new ArrayList<>();
-        List<Feed> outFeeds = new ArrayList<>();
-        inFeeds.add(impressionsFeed);
-        Feed clicksFeed = addFeedEntity("clicks-feed", cluster,
-                "classified-as=Secure,classified-as=Financial", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/clicks-feed/${YEAR}-${MONTH}-${DAY}");
-        inFeeds.add(clicksFeed);
-        Feed join1Feed = addFeedEntity("imp-click-join1", cluster,
-                "classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join1/${YEAR}${MONTH}${DAY}");
-        outFeeds.add(join1Feed);
-        Feed join2Feed = addFeedEntity("imp-click-join2", cluster,
-                "classified-as=Secure,classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join2/${YEAR}${MONTH}${DAY}");
-        outFeeds.add(join2Feed);
-        processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                WORKFLOW_VERSION, inFeeds, outFeeds);
-    }
-
-    private void setupForLineageReplication() throws Exception {
-        cleanUp();
-        service.init();
-
-        List<Feed> inFeeds = new ArrayList<>();
-        List<Feed> outFeeds = new ArrayList<>();
-
-        addClusterAndFeedForReplication(inFeeds);
-
-        // Add output feed
-        Feed join1Feed = addFeedEntity("imp-click-join1", clusterEntity,
-                "classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join1/${YEAR}${MONTH}${DAY}");
-        outFeeds.add(join1Feed);
-
-        processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                WORKFLOW_VERSION, inFeeds, outFeeds);
-
-        // GENERATE WF should have run before this to create all instance related vertices
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                EntityOperations.GENERATE, GENERATE_WORKFLOW_NAME, "imp-click-join1",
-                "jail://global:00/falcon/imp-click-join1/20140101",
-                "jail://global:00/falcon/raw-click/primary/20140101",
-                REPLICATED_FEED), WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-    }
-
-    private void addClusterAndFeedForReplication(List<Feed> inFeeds) throws Exception {
-        // Add cluster
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-        // Add backup cluster
-        Cluster bcpCluster = addClusterEntity(BCP_CLUSTER_ENTITY_NAME, "east-coast", "classification=bcp");
-
-        Cluster[] clusters = {clusterEntity, bcpCluster};
-
-        // Add feed
-        Feed rawFeed = addFeedEntity(REPLICATED_FEED, clusters,
-                "classified-as=Secure", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/raw-click/${YEAR}/${MONTH}/${DAY}");
-        // Add uri template for each cluster
-        for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : rawFeed.getClusters().getClusters()) {
-            if (feedCluster.getName().equals(CLUSTER_ENTITY_NAME)) {
-                addStorage(feedCluster, rawFeed, Storage.TYPE.FILESYSTEM,
-                        "/falcon/raw-click/primary/${YEAR}/${MONTH}/${DAY}");
-            } else {
-                addStorage(feedCluster, rawFeed, Storage.TYPE.FILESYSTEM,
-                        "/falcon/raw-click/bcp/${YEAR}/${MONTH}/${DAY}");
-            }
-        }
-
-        // update config store
-        try {
-            configStore.initiateUpdate(rawFeed);
-            configStore.update(EntityType.FEED, rawFeed);
-        } finally {
-            configStore.cleanupUpdateInit();
-        }
-        inFeeds.add(rawFeed);
-    }
-
-    private void setupForLineageEviction() throws Exception {
-        setup();
-
-        // GENERATE WF should have run before this to create all instance related vertices
-        WorkflowExecutionContext context = WorkflowExecutionContext.create(getTestMessageArgs(
-                        EntityOperations.GENERATE, GENERATE_WORKFLOW_NAME,
-                        "imp-click-join1,imp-click-join1", EVICTED_INSTANCE_PATHS, null, null),
-                WorkflowExecutionContext.Type.POST_PROCESSING);
-        service.onSuccess(context);
-    }
-
-    private void setupForNoDateInFeedPath() throws Exception {
-        cleanUp();
-        service.init();
-
-        // Add cluster
-        clusterEntity = addClusterEntity(CLUSTER_ENTITY_NAME, COLO_NAME,
-                "classification=production");
-        List<Feed> inFeeds = new ArrayList<>();
-        List<Feed> outFeeds = new ArrayList<>();
-        // Add input and output feeds
-        Feed impressionsFeed = addFeedEntity("impression-feed", clusterEntity,
-                "classified-as=Secure", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/impression-feed");
-        inFeeds.add(impressionsFeed);
-        Feed clicksFeed = addFeedEntity("clicks-feed", clusterEntity,
-                "classified-as=Secure,classified-as=Financial", "analytics", Storage.TYPE.FILESYSTEM,
-                "/falcon/clicks-feed");
-        inFeeds.add(clicksFeed);
-        Feed join1Feed = addFeedEntity("imp-click-join1", clusterEntity,
-                "classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join1");
-        outFeeds.add(join1Feed);
-        Feed join2Feed = addFeedEntity("imp-click-join2", clusterEntity,
-                "classified-as=Secure,classified-as=Financial", "reporting,bi", Storage.TYPE.FILESYSTEM,
-                "/falcon/imp-click-join2");
-        outFeeds.add(join2Feed);
-        processEntity = addProcessEntity(PROCESS_ENTITY_NAME, clusterEntity,
-                "classified-as=Critical", "testPipeline,dataReplication_Pipeline", GENERATE_WORKFLOW_NAME,
-                WORKFLOW_VERSION, inFeeds, outFeeds);
-
-    }
-
-    private void cleanUp() throws Exception {
-        cleanupGraphStore(service.getGraph());
-        cleanupConfigurationStore(configStore);
-        service.destroy();
-    }
-
-    private void cleanupGraphStore(Graph graph) {
-        for (Edge edge : graph.getEdges()) {
-            graph.removeEdge(edge);
-        }
-
-        for (Vertex vertex : graph.getVertices()) {
-            graph.removeVertex(vertex);
-        }
-
-        graph.shutdown();
-    }
-
-    private static void cleanupConfigurationStore(ConfigurationStore store) throws Exception {
-        for (EntityType type : EntityType.values()) {
-            Collection<String> entities = store.getEntities(type);
-            for (String entity : entities) {
-                store.remove(type, entity);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/retention/EvictedInstanceSerDeTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/retention/EvictedInstanceSerDeTest.java b/common/src/test/java/org/apache/falcon/retention/EvictedInstanceSerDeTest.java
deleted file mode 100644
index 0f2ee7b..0000000
--- a/common/src/test/java/org/apache/falcon/retention/EvictedInstanceSerDeTest.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.retention;
-
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Unit test for EvictedInstanceSerDe.
- */
-public class EvictedInstanceSerDeTest {
-
-    private EmbeddedCluster cluster;
-    private FileSystem fs;
-    private Path csvFilePath;
-    private StringBuffer evictedInstancePaths = new StringBuffer(
-            "thrift://falcon-distcp-1.cs1cloud.internal:9083/default/retention_hours_7/year=2010")
-            .append(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR)
-            .append("thrift://falcon-distcp-1.cs1cloud.internal:9083/default/retention_hours_7/year=2011");
-
-    @BeforeClass
-    public void start() throws Exception {
-        cluster = EmbeddedCluster.newCluster("test");
-        String hdfsUrl = cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY);
-
-        fs = FileSystem.get(cluster.getConf());
-        csvFilePath = new Path(hdfsUrl + "/falcon/staging/feed/instancePaths-2014-10-01-01-00.csv");
-    }
-
-    @AfterClass
-    public void close() throws Exception {
-        cluster.shutdown();
-    }
-
-    @Test
-    public void testSerializeEvictedInstancePathsForNoEviction() throws Exception {
-        EvictedInstanceSerDe.serializeEvictedInstancePaths(fs, csvFilePath, new StringBuffer());
-
-        Assert.assertEquals(readLogFile(csvFilePath),
-                EvictedInstanceSerDe.INSTANCEPATH_PREFIX);
-    }
-
-    @Test
-    public void testSerializeEvictedInstancePathsWithEviction() throws Exception {
-        EvictedInstanceSerDe.serializeEvictedInstancePaths(fs, csvFilePath, evictedInstancePaths);
-        Assert.assertEquals(readLogFile(csvFilePath), evictedInstancePaths.toString());
-    }
-
-    @Test(dependsOnMethods = "testSerializeEvictedInstancePathsForNoEviction")
-    public void testDeserializeEvictedInstancePathsForNoEviction() throws Exception {
-        String[] instancePaths = EvictedInstanceSerDe.deserializeEvictedInstancePaths(fs, csvFilePath);
-        Assert.assertEquals(instancePaths.length, 0);
-    }
-
-    @Test(dependsOnMethods = "testSerializeEvictedInstancePathsWithEviction")
-    public void testDeserializeEvictedInstancePathsWithEviction() throws Exception {
-        String[] instancePaths = EvictedInstanceSerDe.deserializeEvictedInstancePaths(fs, csvFilePath);
-        Assert.assertEquals(instancePaths.length, 2);
-        Assert.assertTrue(instancePaths[0].equals(
-                "thrift://falcon-distcp-1.cs1cloud.internal:9083/default/retention_hours_7/year=2010"));
-        Assert.assertTrue(instancePaths[1].equals(
-                "thrift://falcon-distcp-1.cs1cloud.internal:9083/default/retention_hours_7/year=2011"));
-
-    }
-
-    private String readLogFile(Path logFile) throws IOException {
-        ByteArrayOutputStream writer = new ByteArrayOutputStream();
-        InputStream date = fs.open(logFile);
-        IOUtils.copyBytes(date, writer, 4096, true);
-        return writer.toString();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/security/AuthenticationInitializationServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/security/AuthenticationInitializationServiceTest.java b/common/src/test/java/org/apache/falcon/security/AuthenticationInitializationServiceTest.java
deleted file mode 100644
index 7979fe0..0000000
--- a/common/src/test/java/org/apache/falcon/security/AuthenticationInitializationServiceTest.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.File;
-
-
-/**
- * Unit test for AuthenticationInitializationService that employs mocks.
- */
-public class AuthenticationInitializationServiceTest {
-
-    private AuthenticationInitializationService authenticationService;
-
-    @Mock
-    private UserGroupInformation mockLoginUser;
-
-    @BeforeClass
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        authenticationService = new AuthenticationInitializationService();
-    }
-
-    @Test
-    public void testGetName() {
-        Assert.assertEquals("Authentication initialization service",
-                authenticationService.getName());
-    }
-
-    @Test
-    public void testInitForSimpleAuthenticationMethod() {
-        try {
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE,
-                    PseudoAuthenticationHandler.TYPE);
-            authenticationService.init();
-
-            UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
-            Assert.assertFalse(loginUser.isFromKeytab());
-            Assert.assertEquals(loginUser.getAuthenticationMethod().name().toLowerCase(),
-                    PseudoAuthenticationHandler.TYPE);
-            Assert.assertEquals(System.getProperty("user.name"), loginUser.getUserName());
-        } catch (Exception e) {
-            Assert.fail("AuthenticationInitializationService init failed.", e);
-        }
-    }
-
-    @Test
-    public void testKerberosAuthenticationWithKeytabFileDoesNotExist() {
-        try {
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE,
-                    KerberosAuthenticationHandler.TYPE);
-            StartupProperties.get().setProperty(AuthenticationInitializationService.KERBEROS_KEYTAB, "/blah/blah");
-            authenticationService.init();
-            Assert.fail("The keytab file does not exist! must have been thrown.");
-        } catch (Exception e) {
-            Assert.assertEquals(e.getCause().getClass(), IllegalArgumentException.class);
-        }
-    }
-
-    @Test
-    public void testKerberosAuthenticationWithKeytabFileIsADirectory() {
-        try {
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE,
-                    KerberosAuthenticationHandler.TYPE);
-            StartupProperties.get().setProperty(AuthenticationInitializationService.KERBEROS_KEYTAB, "/tmp/");
-            authenticationService.init();
-            Assert.fail("The keytab file cannot be a directory! must have been thrown.");
-        } catch (Exception e) {
-            Assert.assertEquals(e.getCause().getClass(), IllegalArgumentException.class);
-        }
-    }
-
-    @Test
-    public void testKerberosAuthenticationWithKeytabFileNotReadable() {
-        File tempFile = new File(".keytabFile");
-        try {
-            assert tempFile.createNewFile();
-            assert tempFile.setReadable(false);
-
-            StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE,
-                    KerberosAuthenticationHandler.TYPE);
-            StartupProperties.get().setProperty(
-                    AuthenticationInitializationService.KERBEROS_KEYTAB, tempFile.toString());
-            authenticationService.init();
-            Assert.fail("The keytab file is not readable! must have been thrown.");
-        } catch (Exception e) {
-            Assert.assertEquals(e.getCause().getClass(), IllegalArgumentException.class);
-        } finally {
-            assert tempFile.delete();
-        }
-    }
-
-    @Test (enabled = false)
-    public void testInitForKerberosAuthenticationMethod() throws FalconException {
-        Mockito.when(mockLoginUser.getAuthenticationMethod())
-                .thenReturn(UserGroupInformation.AuthenticationMethod.KERBEROS);
-        Mockito.when(mockLoginUser.getUserName()).thenReturn(FalconTestUtil.TEST_USER_1);
-        Mockito.when(mockLoginUser.isFromKeytab()).thenReturn(Boolean.TRUE);
-
-        StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE,
-                KerberosAuthenticationHandler.TYPE);
-        StartupProperties.get().setProperty(
-                AuthenticationInitializationService.KERBEROS_KEYTAB, "falcon.kerberos.keytab");
-        StartupProperties.get().setProperty(AuthenticationInitializationService.KERBEROS_PRINCIPAL,
-                FalconTestUtil.TEST_USER_1);
-
-        authenticationService.init();
-
-        Assert.assertTrue(mockLoginUser.isFromKeytab());
-        Assert.assertEquals(mockLoginUser.getAuthenticationMethod().name(),
-                KerberosAuthenticationHandler.TYPE);
-        Assert.assertEquals(FalconTestUtil.TEST_USER_1, mockLoginUser.getUserName());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/security/CurrentUserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/security/CurrentUserTest.java b/common/src/test/java/org/apache/falcon/security/CurrentUserTest.java
deleted file mode 100644
index 5cc6c70..0000000
--- a/common/src/test/java/org/apache/falcon/security/CurrentUserTest.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.falcon.cluster.util.EntityBuilderTestUtil;
-import org.apache.falcon.service.GroupsService;
-import org.apache.falcon.service.ProxyUserService;
-import org.apache.falcon.service.Services;
-import org.apache.falcon.util.RuntimeProperties;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-/**
- * Test for current user's thread safety.
- */
-public class CurrentUserTest {
-    private ProxyUserService proxyUserService;
-    private GroupsService groupsService;
-
-    @BeforeClass
-    public void setUp() throws Exception {
-        Services.get().register(new ProxyUserService());
-        Services.get().register(new GroupsService());
-        groupsService = Services.get().getService(GroupsService.SERVICE_NAME);
-        proxyUserService = Services.get().getService(ProxyUserService.SERVICE_NAME);
-        groupsService.init();
-
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.hosts", "*");
-        RuntimeProperties.get().setProperty("falcon.service.ProxyUserService.proxyuser.foo.groups", "*");
-        proxyUserService.init();
-    }
-
-    @AfterClass
-    public void tearDown() throws Exception {
-        proxyUserService.destroy();
-        groupsService.destroy();
-        Services.get().reset();
-    }
-
-    @AfterMethod
-    public void cleanUp() {
-        CurrentUser.clear();
-    }
-
-    @Test(threadPoolSize = 10, invocationCount = 10, timeOut = 10000)
-    public void testGetUser() throws Exception {
-        String id = Long.toString(System.nanoTime());
-        CurrentUser.authenticate(id);
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), id);
-        Assert.assertEquals(CurrentUser.getUser(), id);
-    }
-
-    @Test (expectedExceptions = IllegalStateException.class)
-    public void testAuthenticateBadUser() throws Exception {
-        CurrentUser.authenticate("");
-    }
-
-    @Test (expectedExceptions = IllegalStateException.class)
-    public void testGetAuthenticatedUserInvalid() throws Exception {
-        CurrentUser.getAuthenticatedUser();
-    }
-
-    @Test (expectedExceptions = IllegalStateException.class)
-    public void testGetUserInvalid() throws Exception {
-        CurrentUser.getUser();
-    }
-
-    @Test (expectedExceptions = IllegalStateException.class)
-    public void testProxyBadUser() throws Exception {
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_1);
-        CurrentUser.proxy("", "");
-    }
-
-    @Test (expectedExceptions = IllegalStateException.class)
-    public void testProxyWithNoAuth() throws Exception {
-        CurrentUser.proxy(FalconTestUtil.TEST_USER_1, "falcon");
-    }
-
-    @Test
-    public void testGetProxyUserForAuthenticatedUser() throws Exception {
-        CurrentUser.authenticate("proxy");
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), "proxy");
-    }
-
-    @Test
-    public void testProxy() throws Exception {
-        CurrentUser.authenticate("real");
-
-        CurrentUser.proxy(EntityBuilderTestUtil.USER, "users");
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), EntityBuilderTestUtil.USER);
-
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), "real");
-        Assert.assertEquals(CurrentUser.getUser(), EntityBuilderTestUtil.USER);
-    }
-
-    @Test
-    public void testProxySameUser() throws Exception {
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_1);
-
-        CurrentUser.proxy(FalconTestUtil.TEST_USER_1, "users");
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), FalconTestUtil.TEST_USER_1);
-
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), FalconTestUtil.TEST_USER_1);
-        Assert.assertEquals(CurrentUser.getUser(), FalconTestUtil.TEST_USER_1);
-    }
-
-    @Test
-    public void testSuperUser() throws Exception {
-        CurrentUser.authenticate(EntityBuilderTestUtil.USER);
-        CurrentUser.proxy("proxy", "users");
-
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), "proxy");
-
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), EntityBuilderTestUtil.USER);
-        Assert.assertEquals(CurrentUser.getUser(), "proxy");
-    }
-
-    @Test(expectedExceptions = IllegalStateException.class)
-    public void testProxyDoAsUserWithNoAuth() throws Exception {
-        CurrentUser.proxyDoAsUser("falcon", "localhost");
-    }
-
-    @Test
-    public void testProxyDoAsUser() throws Exception {
-        CurrentUser.authenticate("foo");
-
-        CurrentUser.proxyDoAsUser(EntityBuilderTestUtil.USER, "localhost");
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), EntityBuilderTestUtil.USER);
-
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), "foo");
-        Assert.assertEquals(CurrentUser.getUser(), EntityBuilderTestUtil.USER);
-    }
-
-    @Test
-    public void testProxyDoAsSameUser() throws Exception {
-        CurrentUser.authenticate("foo");
-
-        CurrentUser.proxyDoAsUser("foo", "localhost");
-        UserGroupInformation proxyUgi = CurrentUser.getProxyUGI();
-        Assert.assertNotNull(proxyUgi);
-        Assert.assertEquals(proxyUgi.getUserName(), "foo");
-
-        Assert.assertEquals(CurrentUser.getAuthenticatedUser(), "foo");
-        Assert.assertEquals(CurrentUser.getUser(), "foo");
-    }
-}


[15/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VerticesResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VerticesResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VerticesResult.java
deleted file mode 100644
index df0fd2a..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/response/lineage/VerticesResult.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.response.lineage;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/** Class for Lineage API result having vertices. */
-public class VerticesResult extends GraphResult {
-    private List<Vertex> results;
-
-    public List<Vertex> getResults() {
-        return results;
-    }
-
-    @Override
-    public String toString() {
-        return String.format("VerticesResult{totalSize=%d, results=%s}", totalSize, results);
-    }
-
-    public List<Vertex> filterByType(Vertex.VERTEX_TYPE vertexType) {
-        return filterVerticesByType(vertexType, results);
-    }
-
-    public List<Vertex> filterVerticesByType(Vertex.VERTEX_TYPE vertexType,
-                                             List<Vertex> vertexList) {
-        List<Vertex> result = new ArrayList<>();
-        for (Vertex vertex : vertexList) {
-            if (vertex.getType() == vertexType) {
-                result.add(vertex);
-            }
-        }
-        return result;
-    }
-
-    public List<Vertex> filterByName(String name) {
-        return filterVerticesByName(name, results);
-    }
-
-    public List<Vertex> filterVerticesByName(String name, List<Vertex> vertexList) {
-        List<Vertex> result = new ArrayList<>();
-        for (Vertex vertex : vertexList) {
-            if (vertex.getName().equals(name)) {
-                result.add(vertex);
-            }
-        }
-        return result;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/Brother.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/Brother.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/Brother.java
deleted file mode 100755
index 4330bff..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/Brother.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.supportClasses;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.Util.URLS;
-import org.testng.TestNGException;
-import org.apache.log4j.Logger;
-
-/** Class for running a rest request in a parallel thread. */
-public class Brother extends Thread {
-    private String operation;
-    private String data;
-    private URLS url;
-    private ServiceResponse output;
-    private static final Logger LOGGER = Logger.getLogger(Brother.class);
-
-    public ServiceResponse getOutput() {
-        return output;
-    }
-
-    private AbstractEntityHelper entityManagerHelper;
-
-    public Brother(String threadName, String operation, EntityType entityType, ThreadGroup tGroup,
-                   Bundle b, ColoHelper p, URLS url) {
-        super(tGroup, threadName);
-        this.operation = operation;
-        switch (entityType) {
-        case PROCESS:
-            this.data = b.getProcessData();
-            this.entityManagerHelper = p.getProcessHelper();
-            break;
-        case CLUSTER:
-            this.entityManagerHelper = p.getClusterHelper();
-            this.data = b.getClusters().get(0);
-            break;
-        case FEED:
-            this.entityManagerHelper = p.getFeedHelper();
-            this.data = b.getDataSets().get(0);
-            break;
-        default:
-            LOGGER.error("Unexpected entityType=" + entityType);
-        }
-        this.url = url;
-        this.output = new ServiceResponse();
-    }
-
-    public void run() {
-        try {
-            sleep(50L);
-        } catch (Exception e) {
-            e.printStackTrace();
-            throw new TestNGException(e.getMessage());
-        }
-        LOGGER.info("Brother " + this.getName() + " will be executing " + operation);
-        try {
-            switch (url) {
-            case SUBMIT_URL:
-                output = entityManagerHelper.submitEntity(data);
-                break;
-            case GET_ENTITY_DEFINITION:
-                output = entityManagerHelper.getEntityDefinition(data);
-                break;
-            case DELETE_URL:
-                output = entityManagerHelper.delete(data);
-                break;
-            case SUSPEND_URL:
-                output = entityManagerHelper.suspend(data);
-                break;
-            case SCHEDULE_URL:
-                output = entityManagerHelper.schedule(data);
-                break;
-            case RESUME_URL:
-                output = entityManagerHelper.resume(data);
-                break;
-            case SUBMIT_AND_SCHEDULE_URL:
-                output = entityManagerHelper.submitAndSchedule(data);
-                break;
-            case STATUS_URL:
-                output = entityManagerHelper.getStatus(data);
-                break;
-            default:
-                LOGGER.error("Unexpected url: " + url);
-                break;
-            }
-            LOGGER.info("Brother " + getName() + "'s response to the "
-                + operation + " is: " + output);
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/ExecResult.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/ExecResult.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/ExecResult.java
deleted file mode 100644
index 8ce7342..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/ExecResult.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.supportClasses;
-
-import org.apache.commons.exec.CommandLine;
-
-/**
- * Class with result of command line execution.
- */
-public final class ExecResult {
-
-    private final int exitVal;
-    private final String output;
-    private final String error;
-    private final CommandLine commandLine;
-
-    public ExecResult(CommandLine commandLine, final int exitVal, final String output,
-                      final String error) {
-        this.exitVal = exitVal;
-        this.output = output;
-        this.error = error;
-        this.commandLine = commandLine;
-    }
-
-    public int getExitVal() {
-        return exitVal;
-    }
-
-    public boolean hasSuceeded() {
-        return exitVal == 0;
-    }
-
-    public String getOutput() {
-        return output;
-    }
-
-    public String getError() {
-        return error;
-    }
-
-    public CommandLine getCommandLine() {
-        return commandLine;
-    }
-
-    @Override
-    public String toString() {
-        return "ExecResult{"
-                + "exitVal=" + exitVal
-                + ", output='" + output + '\''
-                + ", error='" + error + '\''
-                + ", commandLine=" + commandLine
-                + '}';
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/HadoopFileEditor.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/HadoopFileEditor.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/HadoopFileEditor.java
deleted file mode 100644
index 5ea765a..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/HadoopFileEditor.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.supportClasses;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
-
-import java.io.BufferedWriter;
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Class for simulating editing and restoring of a file in hdfs. */
-public class HadoopFileEditor {
-    private static final Logger LOGGER = Logger.getLogger(HadoopFileEditor.class);
-    private FileSystem fs;
-    private List<String> paths;
-    private List<String> files;
-
-    public HadoopFileEditor(FileSystem fs) {
-        this.fs = fs;
-        paths = new ArrayList<>();
-        files = new ArrayList<>();
-    }
-
-    /**
-     * Method to edit a file present on HDFS. Path is the location on HDFS,
-     * @param path path of the file to be edited
-     * @param putAfterString first instance of string after which the text is to be
-     * @param toBeInserted the text to be inserted
-     * @throws IOException
-     */
-    public void edit(String path, String putAfterString, String toBeInserted) throws IOException {
-        paths.add(path);
-        String currentFile = Util.getFileNameFromPath(path);
-        files.add(currentFile);
-        FileUtils.deleteQuietly(new File(currentFile));
-        FileUtils.deleteQuietly(new File("." + currentFile + ".crc"));
-        FileUtils.deleteQuietly(new File(currentFile + ".bck"));
-        FileUtils.deleteQuietly(new File("tmp"));
-
-        Path file = new Path(path);
-        //check if currentFile exists or not
-        if (fs.exists(file)) {
-            fs.copyToLocalFile(file, new Path(currentFile));
-            FileUtils.copyFile(new File(currentFile), new File(currentFile + ".bck"));
-            BufferedWriter bufWriter = new BufferedWriter(new FileWriter("tmp"));
-            BufferedReader br = new BufferedReader(new FileReader(currentFile));
-            String line;
-            boolean isInserted = false;
-            while ((line = br.readLine()) != null) {
-                bufWriter.write(line);
-                bufWriter.write('\n');
-                if (line.contains(putAfterString) && !isInserted) {
-                    bufWriter.write(toBeInserted);
-                    isInserted = true;
-                }
-            }
-            br.close();
-            bufWriter.close();
-            FileUtils.deleteQuietly(new File(currentFile));
-            FileUtils.copyFile(new File("tmp"), new File(currentFile));
-            FileUtils.deleteQuietly(new File("tmp"));
-
-            fs.delete(file, false);
-            File crcFile = new File("." + currentFile + ".crc");
-            if (crcFile.exists()) {
-                LOGGER.info("Result of delete on crcFile" + crcFile + " : " + crcFile.delete());
-            }
-            fs.copyFromLocalFile(new Path(currentFile), file);
-        } else {
-            LOGGER.info("Nothing to do, " + currentFile + " does not exists");
-        }
-    }
-
-    /**
-     * Restore back the original file to HDFS that was edited by edit function.
-     * @throws IOException
-     */
-    public void restore() throws IOException {
-        for (int i = 0; i < paths.size(); i++) {
-            fs.delete(new Path(paths.get(i)), false);
-            FileUtils.deleteQuietly(new File(files.get(i)));
-            FileUtils.copyFile(new File(files.get(i) + ".bck"),
-                new File(files.get(i)));
-            fs.copyFromLocalFile(new Path(files.get(i)), new Path(paths.get(i)));
-            FileUtils.deleteQuietly(new File(files.get(i)));
-            FileUtils.deleteQuietly(new File(files.get(i) + ".bck"));
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/JmsMessageConsumer.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/JmsMessageConsumer.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/JmsMessageConsumer.java
deleted file mode 100644
index 5ce60cd..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/JmsMessageConsumer.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.supportClasses;
-
-import org.apache.activemq.ActiveMQConnectionFactory;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.log4j.Logger;
-
-import javax.jms.Connection;
-import javax.jms.Destination;
-import javax.jms.MapMessage;
-import javax.jms.Message;
-import javax.jms.MessageConsumer;
-import javax.jms.Session;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Collects JMS messages in a separate thread. */
-public class JmsMessageConsumer extends Thread {
-    /*URL of the JMS server
-    brokerURL = "tcp://host:61616?daemon=true";
-    ActiveMQConnection.DEFAULT_BROKER_URL;
-    Name of the queue we will receive messages from
-    String subject = "FALCON.TOPIC";*/
-
-    private static final Logger LOGGER = Logger.getLogger(JmsMessageConsumer.class);
-    private static final int MAX_MESSAGE_COUNT = 1000;
-
-    private final String brokerURL;
-    private final String topicName;
-    private final List<MapMessage> receivedMessages;
-
-    public List<MapMessage> getReceivedMessages() {
-        return receivedMessages;
-    }
-
-    public JmsMessageConsumer(String topicName, String brokerURL) {
-        super(topicName);
-        this.topicName = topicName;
-        this.brokerURL = brokerURL;
-        receivedMessages = new ArrayList<>();
-        setDaemon(true);
-    }
-
-    @Override
-    public void run() {
-        try {
-            // Getting JMS connection from the server
-            Connection connection = new ActiveMQConnectionFactory(brokerURL).createConnection();
-            connection.start();
-
-            // Creating session for sending messages
-            Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
-            Destination destination = session.createTopic(topicName);
-            MessageConsumer consumer = session.createConsumer(destination);
-
-            try {
-                LOGGER.info("Starting to receive messages.");
-                int count = 0;
-                for (; count < MAX_MESSAGE_COUNT; ++count) {
-                    Message message = consumer.receive(); //blocking call
-                    if (message == null) {
-                        LOGGER.info("Received empty message, count = " + count);
-                    } else {
-                        LOGGER.info("Received message, id = " + message.getJMSMessageID());
-                        receivedMessages.add((MapMessage) message);
-                    }
-                }
-                if (count >= MAX_MESSAGE_COUNT) {
-                    LOGGER.warn("Not reading more messages, already read " + count + " messages.");
-                }
-            } finally {
-                LOGGER.info("Stopping to receive messages.");
-                connection.close();
-            }
-        } catch (Exception e) {
-            LOGGER.info("caught exception: " + ExceptionUtils.getStackTrace(e));
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/NotifyingAssert.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/NotifyingAssert.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/NotifyingAssert.java
deleted file mode 100644
index 52b4fd3..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/supportClasses/NotifyingAssert.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.supportClasses;
-
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.log4j.Logger;
-import org.testng.asserts.IAssert;
-import org.testng.asserts.SoftAssert;
-import org.testng.collections.Maps;
-
-import java.util.Map;
-
-/**
- * NotifyingAssert: This is same as SoftAssert provided by TestNg. Additionally, it adds an option
- * of printing stacktrace whenever test execution fails.
- */
-public class NotifyingAssert extends SoftAssert {
-    private final boolean printFailures;
-    // LinkedHashMap to preserve the order
-    private Map<AssertionError, IAssert> mErrors = Maps.newLinkedHashMap();
-    private static final Logger LOGGER = Logger.getLogger(NotifyingAssert.class);
-
-    /**
-     * Same of SoftAssert - just adds an option for logging assertion failure stacktraces.
-     * @param logFailures - switches on printing of stacktrace in logs on failures.
-     */
-    public NotifyingAssert(boolean logFailures) {
-        this.printFailures = logFailures;
-    }
-
-    @Override
-    public void executeAssert(IAssert a) {
-        try {
-            a.doAssert();
-        } catch(AssertionError ex) {
-            onAssertFailure(a, ex);
-            mErrors.put(ex, a);
-            if (printFailures) {
-                LOGGER.info("Assertion failed - exception : " + ex + "\n"
-                    + ExceptionUtils.getStackTrace(ex));
-            }
-        }
-    }
-
-    public void assertAll() {
-        if (!mErrors.isEmpty()) {
-            StringBuilder sb = new StringBuilder("The following asserts failed:\n");
-            boolean first = true;
-            for (Map.Entry<AssertionError, IAssert> ae : mErrors.entrySet()) {
-                if (first) {
-                    first = false;
-                } else {
-                    sb.append(", ");
-                }
-                sb.append(ae.getValue().getMessage());
-            }
-            throw new AssertionError(sb.toString());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/AssertUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/AssertUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/AssertUtil.java
deleted file mode 100644
index cb79e9c..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/AssertUtil.java
+++ /dev/null
@@ -1,519 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.supportClasses.ExecResult;
-import org.apache.falcon.resource.APIResult;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.json.JSONArray;
-import org.json.JSONObject;
-import org.testng.Assert;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-
-/**
- * Util methods for assert.
- */
-public final class AssertUtil {
-
-    private AssertUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    private static final Logger LOGGER = Logger.getLogger(AssertUtil.class);
-
-    /**
-     * Asserts correctness of CLI metrics for recipe based process or feed replication.
-     * @param execResult CLI metrics exec result to be checked
-     * @param entityName name of recipe process or replication feed
-     * @param instanceNum expected number of process/feed instances in metrics output
-     * @param withData is data expected to be replicated
-     * @throws Exception
-     */
-    public static void assertCLIMetrics(ExecResult execResult, String entityName, int instanceNum, boolean withData)
-        throws Exception {
-        String output = execResult.getOutput();
-        Assert.assertTrue(StringUtils.isNotBlank(output), "Exec result output is blank.");
-        JSONObject jsonObject = new JSONObject(output);
-        int totalSize = jsonObject.getInt("totalSize");
-        Assert.assertEquals(totalSize, instanceNum);
-        JSONArray array = jsonObject.getJSONArray("results");
-        for (int i = 0; i < array.length(); i++) {
-            String name = array.getJSONObject(i).getString("name");
-            Assert.assertTrue(name.contains(entityName));
-            int timeTaken = array.getJSONObject(i).getInt("TIMETAKEN");
-            Assert.assertTrue(timeTaken > 0, "TIMETAKEN metric should be greater then zero.");
-            int bytescopied = array.getJSONObject(i).getInt("BYTESCOPIED");
-            Assert.assertTrue(bytescopied >= 0, "BYTESCOPIED metric should be greater or equal to zero.");
-            int copy = array.getJSONObject(i).getInt("COPY");
-            if (withData) {
-                Assert.assertTrue(copy > 0, "COPY metric should be greater then zero.");
-            } else {
-                Assert.assertEquals(copy, 0, "COPY metric should be equal to zero as data was absent.");
-            }
-        }
-    }
-
-    /**
-     * Checks that any path in list doesn't contains a string.
-     *
-     * @param paths list of paths
-     * @param shouldNotBePresent string that shouldn't be present
-     */
-    public static void failIfStringFoundInPath(
-        List<Path> paths, String... shouldNotBePresent) {
-        for (Path path : paths) {
-            for (String aShouldNotBePresent : shouldNotBePresent) {
-                if (path.toUri().toString().contains(aShouldNotBePresent)) {
-                    Assert.fail("String " + aShouldNotBePresent + " was not expected in path "
-                            +
-                            path.toUri().toString());
-                }
-            }
-        }
-    }
-
-    /**
-     * Checks that two lists have same size.
-     *
-     * @param expected expected list
-     * @param actual   actual list
-     */
-    public static void checkForListSizes(List<?> expected, List<?> actual) {
-        if (expected.size() != actual.size()) {
-            LOGGER.info("expected = " + expected);
-        }
-        checkForListSize(actual, expected.size());
-    }
-
-    /**
-     * Checks that two lists have same size.
-     *
-     * @param elements list of elements
-     * @param expectedSize expected size of the list
-     */
-    public static void checkForListSize(List<?> elements, int expectedSize) {
-        if (elements.size() != expectedSize) {
-            LOGGER.info("expectedSize = " + expectedSize);
-            LOGGER.info("elements.size() = " + elements.size());
-            LOGGER.info("elements = " + elements);
-        }
-        Assert.assertEquals(elements.size(), expectedSize,
-            "Size of expected and actual list don't match.");
-    }
-
-    /**
-     * Checks that two lists has expected diff element.
-     *
-     * @param initialState first list
-     * @param finalState   second list
-     * @param filename     expected diff element
-     * @param expectedDiff diff count (positive for new elements)
-     */
-    public static void compareDataStoreStates(List<String> initialState,
-                                              List<String> finalState, String filename,
-                                              int expectedDiff) {
-
-        if (expectedDiff > -1) {
-            finalState.removeAll(initialState);
-            Assert.assertEquals(finalState.size(), expectedDiff);
-            if (expectedDiff != 0) {
-                Assert.assertTrue(finalState.get(0).contains(filename));
-            }
-        } else {
-            expectedDiff = expectedDiff * -1;
-            initialState.removeAll(finalState);
-            Assert.assertEquals(initialState.size(), expectedDiff);
-            if (expectedDiff != 0) {
-                Assert.assertTrue(initialState.get(0).contains(filename));
-            }
-        }
-
-
-    }
-
-    /**
-     * Checks that two lists has expected diff element.
-     *
-     * @param initialState first list
-     * @param finalState   second list
-     * @param expectedDiff diff count (positive for new elements)
-     */
-    public static void compareDataStoreStates(List<String> initialState,
-                                              List<String> finalState, int expectedDiff) {
-
-        if (expectedDiff > -1) {
-            finalState.removeAll(initialState);
-            Assert.assertEquals(finalState.size(), expectedDiff);
-
-        } else {
-            expectedDiff = expectedDiff * -1;
-            initialState.removeAll(finalState);
-            Assert.assertEquals(initialState.size(), expectedDiff);
-
-        }
-
-
-    }
-
-    /**
-     * Checks that ServiceResponse status is SUCCEEDED.
-     *
-     * @param response ServiceResponse
-     * @throws JAXBException
-     */
-    public static void assertSucceeded(ServiceResponse response) throws JAXBException {
-        final APIResult apiResult = Util.parseResponse(response);
-        Assert.assertNotNull(apiResult.getMessage(), "Status message is null");
-        Assert.assertEquals(apiResult.getStatus(), APIResult.Status.SUCCEEDED,
-            "Status should be SUCCEEDED. Message: " + apiResult.getMessage());
-        Assert.assertEquals(response.getCode(), 200,
-            "Status code should be 200. Message: " + apiResult.getMessage());
-    }
-
-    /**
-     * Checks that ServiceResponse status is SUCCEEDED.
-     *
-     * @param response ServiceResponse
-     * @return if the response was a success or not
-     */
-    public static boolean checkSucceeded(ServiceResponse response) {
-        final APIResult apiResult;
-        try {
-            apiResult = Util.parseResponse(response);
-        } catch (JAXBException e) {
-            return false;
-        }
-        return apiResult.getStatus() == APIResult.Status.SUCCEEDED
-            && response.getCode() == 200
-            && apiResult.getMessage() != null;
-    }
-
-    /**
-     * Checks that ProcessInstancesResult status is SUCCEEDED.
-     *
-     * @param response ProcessInstancesResult
-     */
-    public static void assertSucceeded(APIResult response) {
-        Assert.assertNotNull(response.getMessage(), "Status message is null");
-        Assert.assertEquals(response.getStatus(), APIResult.Status.SUCCEEDED,
-            "Status should be SUCCEEDED. Message: " + response.getMessage());
-    }
-
-    /**
-     * Checks that ServiceResponse status is status FAILED.
-     *
-     * @param response ServiceResponse
-     * @param message  message for exception
-     * @throws JAXBException
-     */
-    public static void assertFailed(final ServiceResponse response, final String message)
-        throws JAXBException {
-        assertFailedWithStatus(response, 400, message);
-    }
-
-    /**
-     * Assert that command executed unsuccessfully.
-     *
-     * @param execResult ExecResult of the command execution
-     */
-    public static void assertFailed(ExecResult execResult, String expectedMessage) {
-        Assert.assertFalse(execResult.hasSuceeded(),
-            "Unexpectedly succeeded execResult: " + execResult);
-        Assert.assertTrue((execResult.getError() + execResult.getOutput()).contains(expectedMessage),
-            "Expected error: " + expectedMessage + " in execResult: " + execResult);
-    }
-
-    /**
-     * Checks that ServiceResponse status is status FAILED with some status code.
-     *
-     * @param response   ServiceResponse
-     * @param statusCode expected status code
-     * @param message    message for exception
-     * @throws JAXBException
-     */
-    public static void assertFailedWithStatus(final ServiceResponse response, final int statusCode,
-                                              final String message) throws JAXBException {
-        Assert.assertNotEquals(response.getMessage(), "null", "response message should not be null");
-        Assert.assertEquals(Util.parseResponse(response).getStatus(),
-            APIResult.Status.FAILED, message);
-        Assert.assertEquals(response.getCode(), statusCode, message);
-        Assert.assertNotNull(Util.parseResponse(response).getRequestId(), "RequestId is null");
-    }
-
-    /**
-     * Checks that ServiceResponse status is status PARTIAL.
-     *
-     * @param response ServiceResponse
-     * @throws JAXBException
-     */
-    public static void assertPartial(ServiceResponse response) throws JAXBException {
-        Assert.assertEquals(Util.parseResponse(response).getStatus(), APIResult.Status.PARTIAL);
-        Assert.assertEquals(response.getCode(), 200);
-        Assert.assertNotNull(Util.parseResponse(response).getMessage());
-    }
-
-    /**
-     * Checks that ServiceResponse status is status FAILED with status code 400.
-     *
-     * @param response ServiceResponse
-     * @throws JAXBException
-     */
-    public static void assertFailed(ServiceResponse response) throws JAXBException {
-        Assert.assertNotEquals(response.getMessage(), "null",
-            "response message should not be null");
-
-        Assert.assertEquals(Util.parseResponse(response).getStatus(), APIResult.Status.FAILED);
-        Assert.assertEquals(response.getCode(), 400);
-    }
-
-    /**
-     * Checks that ServiceResponse status is status FAILED with expectedMessage.
-     *
-     * @param response ServiceResponse
-     * @param expectedMessage expected message
-     * @throws JAXBException
-     */
-    public static void assertFailedWithMessage(ServiceResponse response, String expectedMessage) throws JAXBException {
-        assertFailed(response);
-        Assert.assertTrue(response.getMessage().contains(expectedMessage), "Incorrect message in response");
-    }
-
-    /**
-     * Checks that Instance/Triage result status is FAILED.
-     *
-     * @param response APIResult response
-     */
-    public static void assertFailed(APIResult response) {
-        Assert.assertNotEquals(response.getMessage(), "null",
-            "response message should not be null");
-        Assert.assertEquals(response.getStatus(), APIResult.Status.FAILED,
-                "Status should be FAILED. Message: " + response.getMessage());
-    }
-
-    /**
-     * Checks that ServiceResponse status is status FAILED with status code 403.
-     *
-     * @param response ServiceResponse
-     * @throws JAXBException
-     */
-    public static void assertFailedWith403(ServiceResponse response) throws JAXBException {
-        Assert.assertNotEquals(response.getMessage(), "null", "response message should not be null");
-
-        Assert.assertEquals(Util.parseResponse(response).getStatus(), APIResult.Status.FAILED);
-        Assert.assertEquals(response.getCode(), 403);
-    }
-    /**
-     * Checks that status of some entity job is equal to expected. Method can wait
-     * 100 seconds for expected status.
-     *
-     * @param oozieClient    OozieClient
-     * @param entityType     FEED or PROCESS
-     * @param data           feed or proceess XML
-     * @param expectedStatus expected Job.Status of entity
-     * @throws OozieClientException
-     */
-    public static void checkStatus(OozieClient oozieClient, EntityType entityType, String data,
-                                   Job.Status expectedStatus)
-        throws OozieClientException {
-        String name = null;
-        if (entityType == EntityType.FEED) {
-            name = Util.readEntityName(data);
-        } else if (entityType == EntityType.PROCESS) {
-            name = Util.readEntityName(data);
-        }
-        Assert.assertEquals(
-            OozieUtil.verifyOozieJobStatus(oozieClient, name, entityType, expectedStatus), true,
-            "Status should be " + expectedStatus);
-    }
-
-    /**
-     * Checks that status of some entity job is equal to expected. Method can wait
-     * 100 seconds for expected status.
-     *
-     * @param oozieClient    OozieClient
-     * @param entityType     FEED or PROCESS
-     * @param bundle         Bundle with feed or process data
-     * @param expectedStatus expected Job.Status of entity
-     * @throws OozieClientException
-     */
-    public static void checkStatus(OozieClient oozieClient, EntityType entityType, Bundle bundle,
-                                   Job.Status expectedStatus)
-        throws OozieClientException {
-        String data = null;
-        if (entityType == EntityType.FEED) {
-            data = bundle.getDataSets().get(0);
-        } else if (entityType == EntityType.PROCESS) {
-            data = bundle.getProcessData();
-        }
-        checkStatus(oozieClient, entityType, data, expectedStatus);
-    }
-
-    /**
-     * Checks that status of some entity job is NOT equal to expected.
-     *
-     * @param oozieClient    OozieClient
-     * @param entityType     FEED or PROCESS
-     * @param data           feed or proceess XML
-     * @param expectedStatus expected Job.Status of entity
-     * @throws OozieClientException
-     */
-    public static void checkNotStatus(OozieClient oozieClient, EntityType entityType, String data,
-                                      Job.Status expectedStatus)
-        throws OozieClientException {
-        String processName = null;
-        if (entityType == EntityType.FEED) {
-            processName = Util.readEntityName(data);
-        } else if (entityType == EntityType.PROCESS) {
-            processName = Util.readEntityName(data);
-        }
-        Assert.assertNotEquals(OozieUtil.getOozieJobStatus(oozieClient, processName,
-            entityType), expectedStatus, "Status should not be " + expectedStatus);
-    }
-
-    /**
-     * Checks that status of some entity job is NOT equal to expected.
-     *
-     * @param oozieClient    OozieClient
-     * @param entityType     FEED or PROCESS
-     * @param bundle         Bundle with feed or process data
-     * @param expectedStatus expected Job.Status of entity
-     * @throws OozieClientException
-     */
-    public static void checkNotStatus(OozieClient oozieClient, EntityType entityType,
-                                      Bundle bundle, Job.Status expectedStatus)
-        throws OozieClientException {
-        String data = null;
-        if (entityType == EntityType.FEED) {
-            data = bundle.getDataSets().get(0);
-        } else if (entityType == EntityType.PROCESS) {
-            data = bundle.getProcessData();
-        }
-        checkNotStatus(oozieClient, entityType, data, expectedStatus);
-    }
-
-    /**
-     * Checks size of the content a two locations.
-     *
-     * @param firstPath  path to the first location
-     * @param secondPath path to the second location
-     * @param fs         hadoop file system for the locations
-     * @throws IOException
-     */
-    public static void checkContentSize(String firstPath, String secondPath, FileSystem fs) throws
-        IOException {
-        final ContentSummary firstSummary = fs.getContentSummary(new Path(firstPath));
-        final ContentSummary secondSummary = fs.getContentSummary(new Path(secondPath));
-        LOGGER.info(firstPath + " : firstSummary = " + firstSummary.toString(false));
-        LOGGER.info(secondPath + " : secondSummary = " + secondSummary.toString(false));
-        Assert.assertEquals(firstSummary.getLength(), secondSummary.getLength(),
-            "Contents at the two locations don't have same size.");
-    }
-
-    /**
-     * Fail the test because of the supplied exception.
-     * @param e exception
-     */
-    public static void fail(Exception e) {
-        LOGGER.info("Got exception: " + ExceptionUtils.getStackTrace(e));
-        Assert.fail("Failing because of exception.");
-    }
-
-    public static void assertEmpty(String str, String message) {
-        if (StringUtils.isNotEmpty(str)) {
-            Assert.fail(String.format("%s expected [empty string/null] found [%s]", message, str));
-        }
-    }
-
-    public static <E> void assertEmpty(Collection<E> collection, String message) {
-        if (!collection.isEmpty()) {
-            Assert.fail(
-                String.format("%s expected [empty collection] found [%s]", message, collection));
-        }
-    }
-    public static void assertNotEmpty(String str, String message) {
-        if (StringUtils.isEmpty(str)) {
-            Assert.fail(String.format("%s expected non-empty string found [%s]", message, str));
-        }
-    }
-
-    /**
-     * Checks that job logs are copied to user defined cluster staging path.
-     *
-     * @param logFlag     denotes whether is is failed/succeeded log
-     * @param entityName  name of entity
-     * @param clusterFS   hadoop file system for the locations
-     * @param entityType  feed or process
-     */
-    public static boolean assertPath(boolean logFlag, String entityName, FileSystem clusterFS,
-                                     String entityType) throws Exception {
-        String stagingDir= MerlinConstants.STAGING_LOCATION;
-        String path=stagingDir+"/falcon/workflows/"+ entityType + "/" + entityName +"/logs";
-        List<Path> logmoverPaths = HadoopUtil
-                .getAllFilesRecursivelyHDFS(clusterFS, new Path(HadoopUtil.cutProtocol(path)));
-        String part = logFlag ? "SUCCEEDED" : "FAILED";
-        for (Path logmoverPath : logmoverPaths) {
-            if (logmoverPath.toString().contains(part)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Checks that job logs are copied to user defined cluster staging path.
-     *
-     * @param logFlag     denotes whether is is failed/succeeded log
-     * @param entityName  name of entity
-     * @param clusterFS   hadoop file system for the locations
-     * @param entityType  feed or process
-     * @param message     message returned if assert fails
-     */
-    public static void assertLogMoverPath(boolean logFlag, String entityName, FileSystem clusterFS,
-                                          String entityType, String message) throws Exception {
-        Assert.assertTrue(assertPath(logFlag, entityName, clusterFS, entityType), message);
-    }
-
-    /**
-     * Checks that API Response status is FAILED.
-     *
-     * @param response APIResult
-     * @throws JAXBException
-     */
-    public static void assertFailedInstance(APIResult response) throws JAXBException {
-        Assert.assertEquals(response.getStatus(), APIResult.Status.FAILED,
-                "Status should be FAILED. Message: " + response.getMessage());
-        Assert.assertNotNull(response.getMessage(), "response message should not be null");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/BundleUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/BundleUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/BundleUtil.java
deleted file mode 100644
index 16ff6cb..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/BundleUtil.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-/**
- * util methods related to bundle.
- */
-public final class BundleUtil {
-    private BundleUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(BundleUtil.class);
-
-    public static Bundle readFeedReplicationBundle() throws IOException {
-        return readBundleFromFolder("FeedReplicationBundles");
-    }
-
-    public static Bundle readLateDataBundle() throws IOException {
-        return readBundleFromFolder("LateDataBundles");
-    }
-
-    public static Bundle readRetryBundle() throws IOException {
-        return readBundleFromFolder("RetryTests");
-    }
-
-    public static Bundle readRetentionBundle() throws IOException {
-        return readBundleFromFolder("RetentionBundles");
-    }
-
-    public static Bundle readELBundle() throws IOException {
-        return readBundleFromFolder("ELbundle");
-    }
-
-    public static Bundle readHCatBundle() throws IOException {
-        return readBundleFromFolder("hcat");
-    }
-
-    public static Bundle readHCat2Bundle() throws IOException {
-        return readBundleFromFolder("hcat_2");
-    }
-
-    public static Bundle readUpdateBundle() throws IOException {
-        return readBundleFromFolder("updateBundle");
-    }
-
-    public static Bundle readCombinedActionsBundle() throws IOException {
-        return readBundleFromFolder("combinedActions");
-    }
-
-    private static Bundle readBundleFromFolder(final String folderPath) throws IOException {
-        LOGGER.info("Loading xmls from directory: " + folderPath);
-        File directory = null;
-        try {
-            directory = new File(BundleUtil.class.getResource("/" + folderPath).toURI());
-        } catch (URISyntaxException e) {
-            Assert.fail("could not find dir: " + folderPath);
-        }
-        final Collection<File> list = FileUtils.listFiles(directory, new String[] {"xml"}, true);
-        File[] files = list.toArray(new File[list.size()]);
-        Arrays.sort(files);
-        String clusterData = "";
-        final List<String> dataSets = new ArrayList<>();
-        String processData = "";
-
-        for (File file : files) {
-            LOGGER.info("Loading data from path: " + file.getAbsolutePath());
-            final String data = IOUtils.toString(file.toURI());
-
-            if (data.contains("uri:falcon:cluster:0.1")) {
-                LOGGER.info("data been added to cluster");
-                ClusterMerlin clusterMerlin = new ClusterMerlin(data);
-                //set ACL
-                clusterMerlin.setACL(MerlinConstants.CURRENT_USER_NAME,
-                        MerlinConstants.CURRENT_USER_GROUP, "*");
-                //set staging and working locations
-                clusterMerlin.getLocations().getLocations().clear();
-                final Location staging = new Location();
-                staging.setName(ClusterLocationType.STAGING);
-                staging.setPath(MerlinConstants.STAGING_LOCATION);
-                clusterMerlin.getLocations().getLocations().add(staging);
-                final Location working = new Location();
-                working.setName(ClusterLocationType.WORKING);
-                working.setPath(MerlinConstants.WORKING_LOCATION);
-                clusterMerlin.getLocations().getLocations().add(working);
-                final Location temp = new Location();
-                temp.setName(ClusterLocationType.TEMP);
-                temp.setPath(MerlinConstants.TEMP_LOCATION);
-                clusterMerlin.getLocations().getLocations().add(temp);
-                final String protectionPropName = "hadoop.rpc.protection";
-                final String protectionPropValue = Config.getProperty(protectionPropName);
-                if (StringUtils.isNotEmpty(protectionPropValue)) {
-                    clusterMerlin.withProperty(protectionPropName, protectionPropValue.trim());
-                }
-                clusterData = clusterMerlin.toString();
-            } else if (data.contains("uri:falcon:feed:0.1")) {
-                LOGGER.info("data been added to feed");
-                FeedMerlin feedMerlin = new FeedMerlin(data);
-                feedMerlin.setACL(MerlinConstants.CURRENT_USER_NAME,
-                        MerlinConstants.CURRENT_USER_GROUP, "*");
-                dataSets.add(feedMerlin.toString());
-            } else if (data.contains("uri:falcon:process:0.1")) {
-                LOGGER.info("data been added to process");
-                ProcessMerlin processMerlin = new ProcessMerlin(data);
-                processMerlin.setACL(MerlinConstants.CURRENT_USER_NAME,
-                        MerlinConstants.CURRENT_USER_GROUP, "*");
-                processData = processMerlin.toString();
-            }
-        }
-        Assert.assertNotNull(clusterData, "expecting cluster data to be non-empty");
-        Assert.assertTrue(!dataSets.isEmpty(), "expecting feed data to be non-empty");
-        return new Bundle(clusterData, dataSets, processData);
-    }
-
-    public static void submitAllClusters(ColoHelper prismHelper, Bundle... b)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        for (Bundle aB : b) {
-            ServiceResponse r = prismHelper.getClusterHelper().submitEntity(aB.getClusters().get(0));
-            Assert.assertTrue(r.getMessage().contains("SUCCEEDED"));
-
-        }
-    }
-
-    /**
-     * Configures cluster definition according to provided properties.
-     * @param cluster cluster which should be configured
-     * @param prefix current cluster prefix
-     * @return modified cluster definition
-     */
-    public static ClusterMerlin getEnvClusterXML(String cluster, String prefix) {
-        ClusterMerlin clusterObject = new ClusterMerlin(cluster);
-        if ((null == prefix) || prefix.isEmpty()) {
-            prefix = "";
-        } else {
-            prefix = prefix + ".";
-        }
-        String hcatEndpoint = Config.getProperty(prefix + "hcat_endpoint");
-
-        //now read and set relevant values
-        for (Interface iface : clusterObject.getInterfaces().getInterfaces()) {
-            if (iface.getType() == Interfacetype.READONLY) {
-                iface.setEndpoint(Config.getProperty(prefix + "cluster_readonly"));
-            } else if (iface.getType() == Interfacetype.WRITE) {
-                iface.setEndpoint(Config.getProperty(prefix + "cluster_write"));
-            } else if (iface.getType() == Interfacetype.EXECUTE) {
-                iface.setEndpoint(Config.getProperty(prefix + "cluster_execute"));
-            } else if (iface.getType() == Interfacetype.WORKFLOW) {
-                iface.setEndpoint(Config.getProperty(prefix + "oozie_url"));
-            } else if (iface.getType() == Interfacetype.MESSAGING) {
-                iface.setEndpoint(Config.getProperty(prefix + "activemq_url"));
-            } else if (iface.getType() == Interfacetype.REGISTRY) {
-                iface.setEndpoint(hcatEndpoint);
-            }
-        }
-        //set colo name:
-        clusterObject.setColo(Config.getProperty(prefix + "colo"));
-        // properties in the cluster needed when secure mode is on
-        if (MerlinConstants.IS_SECURE) {
-            // add the namenode principal to the properties object
-            clusterObject.withProperty("dfs.namenode.kerberos.principal",
-                Config.getProperty(prefix + "namenode.kerberos.principal", "none"));
-
-            // add the hive meta store principal to the properties object
-            clusterObject.withProperty("hive.metastore.kerberos.principal",
-                Config.getProperty(prefix + "hive.metastore.kerberos.principal", "none"));
-
-            // Until oozie has better integration with secure hive we need to send the properties to
-            // falcon.
-            // hive.metastore.sasl.enabled = true
-            clusterObject.withProperty("hive.metastore.sasl.enabled", "true");
-            // Only set the metastore uri if its not empty or null.
-        }
-        String hiveMetastoreUris = Config.getProperty(prefix + "hive.metastore.uris");
-        if (StringUtils.isNotBlank(hiveMetastoreUris)) {
-            //hive.metastore.uris
-            clusterObject.withProperty("hive.metastore.uris", hiveMetastoreUris);
-        }
-        String hiveServer2Uri = Config.getProperty(prefix + "hive.server2.uri");
-        if (StringUtils.isNotBlank(hiveServer2Uri)) {
-            //hive.metastore.uris
-            clusterObject.withProperty("hive.server2.uri", hiveServer2Uri);
-        }
-        return clusterObject;
-    }
-
-    public static List<ClusterMerlin> getClustersFromStrings(List<String> clusterStrings) {
-        List<ClusterMerlin> clusters = new ArrayList<>();
-        for (String clusterString : clusterStrings) {
-            clusters.add(new ClusterMerlin(clusterString));
-        }
-        return clusters;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/CleanupUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/CleanupUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/CleanupUtil.java
deleted file mode 100644
index 225bc5b..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/CleanupUtil.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.resource.EntityList;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import java.io.IOException;
-import java.io.StringReader;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * util methods related to conf.
- */
-public final class CleanupUtil {
-    private CleanupUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(CleanupUtil.class);
-
-    public static List<String> getAllEntitiesOfOneType(AbstractEntityHelper entityManagerHelper,
-                                                       String user) {
-        return getEntitiesWithPrefix(entityManagerHelper, user, "");
-    }
-
-    public static List<String> getEntitiesWithPrefix(AbstractEntityHelper entityHelper,
-                                                       String user, String namePrefix) {
-        final EntityList entityList;
-        try {
-            entityList = getEntitiesResultOfOneType(entityHelper, user);
-        } catch (Exception e) {
-            LOGGER.error("Caught exception: " + ExceptionUtils.getStackTrace(e));
-            return null;
-        }
-        List<String> entities = new ArrayList<>();
-        if (entityList.getElements() != null) {
-            for (EntityList.EntityElement entity : entityList.getElements()) {
-                if (entity.name.startsWith(namePrefix)) {
-                    entities.add(entity.name);
-                }
-            }
-        }
-        return entities;
-    }
-
-    private static EntityList getEntitiesResultOfOneType(
-        AbstractEntityHelper entityManagerHelper, String user)
-        throws IOException, URISyntaxException, AuthenticationException, JAXBException,
-        InterruptedException {
-        final ServiceResponse clusterResponse = entityManagerHelper.listAllEntities(null, user);
-        JAXBContext jc = JAXBContext.newInstance(EntityList.class);
-        Unmarshaller u = jc.createUnmarshaller();
-        return (EntityList) u.unmarshal(
-            new StringReader(clusterResponse.getMessage()));
-    }
-
-    public static void cleanEntitiesWithPrefix(ColoHelper prism, String namePrefix, String user) {
-        final List<String> processes = getEntitiesWithPrefix(prism.getProcessHelper(), user, namePrefix);
-        final List<String> feeds = getEntitiesWithPrefix(prism.getFeedHelper(), user, namePrefix);
-        final List<String> clusters = getEntitiesWithPrefix(prism.getClusterHelper(), user, namePrefix);
-
-        for (String process : processes) {
-            deleteQuietlyByName(prism.getProcessHelper(), process, user);
-        }
-        for (String feed : feeds) {
-            deleteQuietlyByName(prism.getFeedHelper(), feed, user);
-        }
-
-        for (String cluster : clusters) {
-            deleteQuietlyByName(prism.getClusterHelper(), cluster, user);
-        }
-    }
-
-    private static void deleteQuietlyByName(AbstractEntityHelper helper, String entityName, String user) {
-        try {
-            helper.deleteByName(entityName, user);
-        } catch (Exception e) {
-            LOGGER.info("Caught exception: " + ExceptionUtils.getStackTrace(e));
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Config.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Config.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Config.java
deleted file mode 100644
index ba509e4..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Config.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.configuration.AbstractConfiguration;
-import org.apache.commons.configuration.CompositeConfiguration;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.commons.configuration.PropertiesConfiguration;
-import org.apache.commons.configuration.SystemConfiguration;
-import org.apache.commons.configuration.reloading.FileChangedReloadingStrategy;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-/** Class for reading properties from Merlin.properties file. */
-public final class Config {
-    private static final Logger LOGGER = Logger.getLogger(Config.class);
-
-    private static final String MERLIN_PROPERTIES = "Merlin.properties";
-    private static final Config INSTANCE = new Config(MERLIN_PROPERTIES);
-
-    private AbstractConfiguration confObj;
-    private Config(String propFileName) {
-        try {
-            initConfig(propFileName);
-        } catch (ConfigurationException e) {
-            Assert.fail("Could not read properties because of exception: " + e);
-        }
-    }
-
-    private void initConfig(String propFileName) throws ConfigurationException {
-        CompositeConfiguration compositeConfiguration = new CompositeConfiguration();
-        LOGGER.info("Going to add properties from system properties.");
-        compositeConfiguration.addConfiguration(new SystemConfiguration());
-
-        LOGGER.info("Going to read properties from: " + propFileName);
-        final PropertiesConfiguration merlinConfig =
-            new PropertiesConfiguration(Config.class.getResource("/" + propFileName));
-        //if changed configuration will be reloaded within 2 min
-        final FileChangedReloadingStrategy reloadingStrategy = new FileChangedReloadingStrategy();
-        reloadingStrategy.setRefreshDelay(2 * 60 * 1000);
-        merlinConfig.setReloadingStrategy(reloadingStrategy);
-        compositeConfiguration.addConfiguration(merlinConfig);
-        this.confObj = compositeConfiguration;
-    }
-
-    public static String getProperty(String key) {
-        return INSTANCE.confObj.getString(key);
-    }
-
-    public static String[] getStringArray(String key) {
-        return INSTANCE.confObj.getStringArray(key);
-    }
-
-    public static String getProperty(String key, String defaultValue) {
-        return INSTANCE.confObj.getString(key, defaultValue);
-    }
-
-    public static boolean getBoolean(String key, boolean defaultValue) {
-        return INSTANCE.confObj.getBoolean(key, defaultValue);
-    }
-
-    public static int getInt(String key, int defaultValue) {
-        return INSTANCE.confObj.getInt(key, defaultValue);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/EntityLineageUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/EntityLineageUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/EntityLineageUtil.java
deleted file mode 100644
index 3b6314f..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/EntityLineageUtil.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.resource.LineageGraphResult;
-import org.apache.falcon.resource.TriageResult;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTime;
-import org.testng.Assert;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- *Util function related to entity lineage.
- */
-public final class EntityLineageUtil{
-
-    private static final Logger LOGGER = Logger.getLogger(EntityLineageUtil.class);
-
-    /**
-     * Enum to represent entity role in pipeline.
-     */
-    public enum PipelineEntityType {
-        PROCESS, INPUT_FEED, OUTPUT_FEED
-    }
-
-    private EntityLineageUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    /**
-     * Validates entity lineage results.
-     * @param lineageGraphResult entity lineage result
-     * @param expectedVertices array of expected vertices
-     * @param expectedEdgeArray array of expected edges
-     */
-    public static void validateLineageGraphResult(LineageGraphResult lineageGraphResult, String[] expectedVertices,
-                                                  LineageGraphResult.Edge[] expectedEdgeArray) {
-        String[] actualVertices;
-        LineageGraphResult.Edge[] actualEdgeArray;
-        Set<String> actualVerticesSet = new HashSet<>();
-        Set<LineageGraphResult.Edge> actualEdgeSet = new HashSet<>();
-
-        try {
-            actualVertices = lineageGraphResult.getVertices();
-            actualVerticesSet = new HashSet<>(Arrays.asList(actualVertices));
-        } catch (NullPointerException e) {
-            Assert.assertEquals(expectedVertices.length, 0);
-        }
-        try {
-            actualEdgeArray = lineageGraphResult.getEdges();
-            actualEdgeSet = new HashSet<>(Arrays.asList(actualEdgeArray));
-        } catch (NullPointerException e) {
-            Assert.assertEquals(expectedEdgeArray.length, 0);
-        }
-
-        Set<LineageGraphResult.Edge> expectedEdgeSet = new HashSet<>(Arrays.asList(expectedEdgeArray));
-        Set<String> expectedVerticesSet = new HashSet<>(Arrays.asList(expectedVertices));
-
-        Assert.assertEquals(actualEdgeSet, expectedEdgeSet, "Edges dont match");
-        Assert.assertEquals(actualVerticesSet, expectedVerticesSet, "Vertices dont match");
-    }
-
-    /**
-     * Validates that failed response contains specific error message.
-     * @param triageResult response
-     * @param error expected error
-     */
-    public static void validateError(TriageResult triageResult, ResponseErrors error) {
-        AssertUtil.assertFailed(triageResult);
-        Assert.assertTrue(triageResult.getMessage().contains(error.getError()),
-                "Error should contain '" + error + "'");
-    }
-
-    /**
-     * Produces list of expected vertices and edges in triage result.
-     */
-    public static LineageGraphResult getExpectedResult(int bundleIndx,
-                                                       Map<PipelineEntityType, List<String>> entityNamesMap,
-                                                       List<Integer> inputFeedFrequencies, String entityName,
-                                                       String clusterName, String startTime) {
-        List<String> processNames = entityNamesMap.get(PipelineEntityType.PROCESS);
-        List<String> inputFeedNames = entityNamesMap.get(PipelineEntityType.INPUT_FEED);
-        List<String> outputFeedNames = entityNamesMap.get(PipelineEntityType.OUTPUT_FEED);
-        List<String> vertices = new ArrayList<>();
-        List<LineageGraphResult.Edge> edges = new ArrayList<>();
-        final String startTimeMinus20 = TimeUtil.addMinsToTime(startTime, -20);
-        String vertexTemplate = "name: %s, type: %s, cluster: %s, instanceTime: %s, tags: %s";
-        for (int i = 0; i <= bundleIndx; ++i) {
-            //add vertex of i-th bundle process
-            boolean isTerminalInstance = processNames.contains(entityName) && i == bundleIndx;
-            String tag = isTerminalInstance ? "[WAITING]" : "Output[WAITING]";
-            final String processVertex = String.format(vertexTemplate,
-                processNames.get(i), "PROCESS", clusterName, startTime, tag);
-            vertices.add(processVertex);
-
-            //add all input feed vertices & edges for i-th bundle
-            LineageGraphResult.Edge edge;
-            String feedVertex;
-            for (DateTime dt = new DateTime(startTime); !dt.isBefore(new DateTime(startTimeMinus20));
-                 dt = dt.minusMinutes(inputFeedFrequencies.get(i))) {
-                feedVertex = String.format(vertexTemplate, inputFeedNames.get(i), "FEED",
-                    clusterName, TimeUtil.dateToOozieDate(dt.toDate()), "Input[MISSING]");
-                edge = new LineageGraphResult.Edge(feedVertex, processVertex, "consumed by");
-                vertices.add(feedVertex);
-                edges.add(edge);
-            }
-            //add output feed edge for i-th bundle
-            tag = (outputFeedNames.contains(entityName) && i == bundleIndx) ? "[MISSING]" : "Input[MISSING]";
-            feedVertex = String.format(vertexTemplate, outputFeedNames.get(i), "FEED", clusterName, startTime, tag);
-            isTerminalInstance = i == bundleIndx && outputFeedNames.contains(entityName);
-            if (i < bundleIndx || isTerminalInstance) {
-                edge = new LineageGraphResult.Edge(processVertex, feedVertex, "produces");
-                edges.add(edge);
-            }
-            //add output feed vertex only if it is terminal; it will be added as the input for next bundle otherwise
-            if (isTerminalInstance) {
-                vertices.add(feedVertex);
-            }
-        }
-        LineageGraphResult lineageGraphResult = new LineageGraphResult();
-        lineageGraphResult.setVertices(vertices.toArray(new String[vertices.size()]));
-        lineageGraphResult.setEdges(edges.toArray(new LineageGraphResult.Edge[edges.size()]));
-        return lineageGraphResult;
-    }
-
-}
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/ExecUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/ExecUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/ExecUtil.java
deleted file mode 100644
index a792f62..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/ExecUtil.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import com.jcraft.jsch.ChannelExec;
-import com.jcraft.jsch.JSch;
-import com.jcraft.jsch.JSchException;
-import com.jcraft.jsch.Session;
-import com.jcraft.jsch.UserInfo;
-import org.apache.commons.exec.CommandLine;
-import org.apache.commons.exec.DefaultExecutor;
-import org.apache.commons.exec.ExecuteWatchdog;
-import org.apache.commons.exec.PumpStreamHandler;
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.regression.core.supportClasses.ExecResult;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-import java.io.BufferedReader;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-/**
- * util methods related to exec.
- */
-public final class ExecUtil {
-    private ExecUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(ExecUtil.class);
-
-    static List<String> runRemoteScriptAsSudo(final String hostName, final String userName,
-                                              final String password, final String command,
-                                              final String runAs, final String identityFile) throws
-        JSchException, IOException {
-        JSch jsch = new JSch();
-        Session session = jsch.getSession(userName, hostName, 22);
-        // only set the password if its not empty
-        if (null != password && !password.isEmpty()) {
-            session.setUserInfo(new HardcodedUserInfo(password));
-        }
-        Properties config = new Properties();
-        config.setProperty("StrictHostKeyChecking", "no");
-        config.setProperty("UserKnownHostsFile", "/dev/null");
-        // only set the password if its not empty
-        if (null == password || password.isEmpty()) {
-            jsch.addIdentity(identityFile);
-        }
-        session.setConfig(config);
-        session.connect();
-        Assert.assertTrue(session.isConnected(), "The session was not connected correctly!");
-
-        List<String> data = new ArrayList<>();
-
-        ChannelExec channel = (ChannelExec) session.openChannel("exec");
-        channel.setPty(true);
-        String runCmd;
-        if (null == runAs || runAs.isEmpty()) {
-            runCmd = "sudo -S -p '' " + command;
-        } else {
-            runCmd = String.format("sudo su - %s -c '%s'", runAs, command);
-        }
-        if (userName.equals(runAs)) {
-            runCmd = command;
-        }
-        LOGGER.info(
-            "host_name: " + hostName + " user_name: " + userName + " password: " + password
-                    +
-                " command: " +runCmd);
-        channel.setCommand(runCmd);
-        InputStream in = channel.getInputStream();
-        OutputStream out = channel.getOutputStream();
-        channel.setErrStream(System.err);
-        channel.connect();
-        TimeUtil.sleepSeconds(20);
-        // only print the password if its not empty
-        if (null != password && !password.isEmpty()) {
-            out.write((password + "\n").getBytes());
-            out.flush();
-        }
-
-        //save console output to data
-        BufferedReader r = new BufferedReader(new InputStreamReader(in));
-        String line;
-        while (true) {
-            while ((line=r.readLine())!=null) {
-                LOGGER.debug(line);
-                data.add(line);
-            }
-            if (channel.isClosed()) {
-                break;
-            }
-        }
-
-        byte[] tmp = new byte[1024];
-        while (true) {
-            while (in.available() > 0) {
-                int i = in.read(tmp, 0, 1024);
-                if (i < 0) {
-                    break;
-                }
-                LOGGER.info(new String(tmp, 0, i));
-            }
-            if (channel.isClosed()) {
-                LOGGER.info("exit-status: " + channel.getExitStatus());
-                break;
-            }
-            TimeUtil.sleepSeconds(1);
-        }
-
-        IOUtils.closeQuietly(r);
-        IOUtils.closeQuietly(in);
-        channel.disconnect();
-        session.disconnect();
-        IOUtils.closeQuietly(out);
-        return data;
-    }
-
-    public static ExecResult executeCommand(String command) {
-        return executeCommand(CommandLine.parse(command));
-    }
-
-    public static ExecResult executeCommand(CommandLine commandLine) {
-        LOGGER.info("Command to be executed: " + commandLine);
-        DefaultExecutor executor = new DefaultExecutor();
-        executor.setWatchdog(new ExecuteWatchdog(5 * 1000)); //timeout of 5 seconds
-        final ByteArrayOutputStream outStream = new ByteArrayOutputStream();
-        final ByteArrayOutputStream errStream = new ByteArrayOutputStream();
-        executor.setStreamHandler(new PumpStreamHandler(outStream, errStream));
-        int exitVal = 1;
-        String exception = "";
-        try {
-            exitVal = executor.execute(commandLine);
-        } catch (IOException e) {
-            LOGGER.warn("Caught exception: " + e);
-            exception = e.toString();
-        }
-        final String output = outStream.toString();
-        String errors = errStream.toString();
-        errors = errors.isEmpty() ? exception : errors;
-
-        LOGGER.info("exitVal: " + exitVal);
-        LOGGER.info("output: " + output);
-        LOGGER.info("errors: " + errors);
-        return new ExecResult(commandLine, exitVal, output.trim(), errors.trim());
-    }
-
-    public static int executeCommandGetExitCode(String command) {
-        return executeCommand(command).getExitVal();
-    }
-
-    public static String executeCommandGetOutput(String command) {
-        return executeCommand(command).getOutput();
-    }
-
-    private  static final class HardcodedUserInfo implements UserInfo {
-
-        private final String password;
-
-        private HardcodedUserInfo(String password) {
-            this.password = password;
-        }
-
-        public String getPassphrase() {
-            return null;
-        }
-
-        public String getPassword() {
-            return password;
-        }
-
-        public boolean promptPassword(String s) {
-            return true;
-        }
-
-        public boolean promptPassphrase(String s) {
-            return true;
-        }
-
-        public boolean promptYesNo(String s) {
-            return true;
-        }
-
-        public void showMessage(String s) {
-            LOGGER.info("message = " + s);
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/FileUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/FileUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/FileUtil.java
deleted file mode 100644
index a969f1f..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/FileUtil.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.log4j.Logger;
-
-import java.io.File;
-import java.io.IOException;
-/**
- * Util class for local files.
- */
-public final class FileUtil {
-    private static final Logger LOGGER = Logger.getLogger(FileUtil.class);
-    private FileUtil() {
-    }
-
-    /**
-     * Writes an entity to a file and returns the filename.
-     * @param entity to be written
-     * @return name of the file
-     * @throws IOException
-     */
-    public static String writeEntityToFile(String entity) throws IOException {
-        final String entityName = Util.readEntityName(entity);
-        final File entityFile = new File(entityName + ".xml");
-        LOGGER.info("attempting to write: " + entityName + " at location "
-            + entityFile.getAbsolutePath());
-        FileUtils.write(entityFile, entity);
-        return entityFile.getAbsolutePath();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Generator.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Generator.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Generator.java
deleted file mode 100644
index 5842ced..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Generator.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-/** Generator class for generating predictable names and paths. */
-public final class Generator {
-    private final String prefix;
-    private final String postfix;
-    private final String formatString;
-    private int count;
-
-
-    private Generator(String prefix, String postfix, String formatString) {
-        this.prefix = prefix;
-        this.postfix = postfix;
-        this.count = 0;
-        this.formatString = formatString;
-    }
-
-    public String generate() {
-        count++;
-        return String.format(formatString, prefix, count, postfix);
-    }
-
-    public static Generator getNameGenerator(String prefix, String postfix) {
-        return new Generator(prefix, postfix, "%s%03d-%s");
-    }
-
-    public static Generator getHadoopPathGenerator(String prefix, String postfix) {
-        return new Generator(prefix, postfix, "%s_%03d%s");
-    }
-}


[11/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/BaseRequest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/BaseRequest.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/BaseRequest.java
deleted file mode 100644
index 60eeabd..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/BaseRequest.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.request;
-
-import org.apache.commons.net.util.TrustManagerUtils;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.security.FalconAuthorizationToken;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-import org.apache.http.Header;
-import org.apache.http.HttpEntityEnclosingRequest;
-import org.apache.http.HttpHost;
-import org.apache.http.HttpRequest;
-import org.apache.http.HttpResponse;
-import org.apache.http.HttpStatus;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.conn.scheme.Scheme;
-import org.apache.http.conn.scheme.SchemeRegistry;
-import org.apache.http.conn.ssl.AllowAllHostnameVerifier;
-import org.apache.http.conn.ssl.SSLSocketFactory;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.client.DefaultHttpClient;
-import org.apache.http.impl.conn.BasicClientConnectionManager;
-import org.apache.http.message.BasicHeader;
-import org.apache.http.util.EntityUtils;
-import org.apache.log4j.Logger;
-
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.TrustManager;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-import java.util.List;
-
-/** Class for making rest requests. */
-public class BaseRequest {
-
-    private static final boolean AUTHENTICATE = setAuthenticate();
-    private static final Logger LOGGER = Logger.getLogger(BaseRequest.class);
-
-    private String method;
-    private String url;
-    private List<Header> headers;
-    private String requestData;
-    private String user;
-    private URI uri;
-    private HttpHost target;
-    private static final SSLSocketFactory SSL_SOCKET_FACTORY;
-
-    static {
-        try {
-            SSLContext ssl = getSslContext();
-            SSL_SOCKET_FACTORY = new SSLSocketFactory(ssl, new AllowAllHostnameVerifier());
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public static SSLContext getSslContext() throws Exception {
-        SSLContext sslContext = SSLContext.getInstance("SSL");
-        sslContext.init(
-                null,
-                new TrustManager[]{TrustManagerUtils.getValidateServerCertificateTrustManager()},
-                new SecureRandom());
-        return sslContext;
-    }
-
-    public BaseRequest(String url, String method, String user) throws URISyntaxException {
-        this(url, method, user, null);
-    }
-
-    public BaseRequest(String url, String method, String user, String data)
-        throws URISyntaxException {
-        this.method = method;
-        this.url = url;
-        this.requestData = null;
-        this.user = (null == user) ? MerlinConstants.CURRENT_USER_NAME : user;
-        this.uri = new URI(url);
-        target = new HttpHost(uri.getHost(), uri.getPort(), uri.getScheme());
-        this.headers = new ArrayList<>();
-        this.requestData = data;
-    }
-
-    private static boolean setAuthenticate() {
-        String value = Config.getProperty("isAuthenticationSet");
-        value = (null == value) ? "true" : value;
-        return !value.equalsIgnoreCase("false");
-    }
-
-    public void addHeader(String name, String value) {
-        headers.add(new BasicHeader(name, value));
-    }
-
-    public HttpResponse run() throws URISyntaxException, IOException, AuthenticationException,
-            InterruptedException {
-        URIBuilder uriBuilder = new URIBuilder(this.url);
-
-        /*falcon now reads a user.name parameter in the request.
-        by default we will add it to every request.*/
-        uriBuilder.addParameter(PseudoAuthenticator.USER_NAME, this.user);
-        uri = uriBuilder.build();
-        this.url=uri.toString();
-        // process the get
-        if (this.method.equalsIgnoreCase("get")) {
-            return execute(new HttpGet(this.url));
-        } else if (this.method.equalsIgnoreCase("delete")) {
-            return execute(new HttpDelete(this.url));
-        }
-
-        HttpEntityEnclosingRequest request = null;
-        if (this.method.equalsIgnoreCase("post")) {
-            request = new HttpPost(new URI(this.url));
-        } else if (this.method.equalsIgnoreCase("put")) {
-            request = new HttpPut(new URI(this.url));
-        } else {
-            throw new IOException("Unknown method: " + method);
-        }
-        if (this.requestData != null) {
-            request.setEntity(new StringEntity(requestData));
-        }
-        return execute(request);
-    }
-
-    private HttpResponse execute(HttpRequest request)
-        throws IOException, AuthenticationException, InterruptedException {
-        // add headers to the request
-        if (null != headers && headers.size() > 0) {
-            for (Header header : headers) {
-                request.addHeader(header);
-            }
-        }
-        /*get the token and add it to the header.
-        works in secure and un secure mode.*/
-        AuthenticatedURL.Token token;
-        if (AUTHENTICATE) {
-            token = FalconAuthorizationToken.getToken(user, uri.getScheme(),
-                    uri.getHost(), uri.getPort());
-            request.addHeader(RequestKeys.COOKIE, RequestKeys.AUTH_COOKIE_EQ + token);
-        }
-
-        HttpClient client;
-        if (uri.toString().startsWith("https")) {
-            SchemeRegistry schemeRegistry = new SchemeRegistry();
-            schemeRegistry.register(new Scheme("https", uri.getPort(), SSL_SOCKET_FACTORY));
-            BasicClientConnectionManager cm = new BasicClientConnectionManager(schemeRegistry);
-            client = new DefaultHttpClient(cm);
-        } else {
-            client = new DefaultHttpClient();
-        }
-        LOGGER.info("Request Url: " + request.getRequestLine().getUri());
-        LOGGER.info("Request Method: " + request.getRequestLine().getMethod());
-
-        for (Header header : request.getAllHeaders()) {
-            LOGGER.info(String.format("Request Header: Name=%s Value=%s", header.getName(),
-                header.getValue()));
-        }
-        HttpResponse response = client.execute(target, request);
-
-        /*incase the cookie is expired and we get a negotiate error back, generate the token again
-        and send the request*/
-        if ((response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED)) {
-            Header[] wwwAuthHeaders = response.getHeaders(RequestKeys.WWW_AUTHENTICATE);
-            if (wwwAuthHeaders != null && wwwAuthHeaders.length != 0
-                && wwwAuthHeaders[0].getValue().trim().startsWith(RequestKeys.NEGOTIATE)) {
-                if (AUTHENTICATE) {
-                    token = FalconAuthorizationToken.getToken(user, uri.getScheme(),
-                        uri.getHost(), uri.getPort(), true);
-                    request.removeHeaders(RequestKeys.COOKIE);
-                    request.addHeader(RequestKeys.COOKIE, RequestKeys.AUTH_COOKIE_EQ + token);
-                }
-                LOGGER.info("Request Url: " + request.getRequestLine().getUri());
-                LOGGER.info("Request Method: " + request.getRequestLine().getMethod());
-                for (Header header : request.getAllHeaders()) {
-                    LOGGER.info(String.format("Request Header: Name=%s Value=%s", header.getName(),
-                        header.getValue()));
-                }
-                // Must call this to release the connection
-                EntityUtils.consume(response.getEntity());
-                response = client.execute(target, request);
-            }
-        }
-        LOGGER.info("Response Status: " + response.getStatusLine());
-        for (Header header : response.getAllHeaders()) {
-            LOGGER.info(String.format("Response Header: Name=%s Value=%s", header.getName(),
-                header.getValue()));
-        }
-        return response;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/RequestKeys.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/RequestKeys.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/RequestKeys.java
deleted file mode 100644
index 05d5fc6..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/request/RequestKeys.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.request;
-
-/** Class containing different request keys. */
-public final class RequestKeys {
-    private RequestKeys() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static final String CONTENT_TYPE_HEADER = "Content-Type";
-    public static final String XML_CONTENT_TYPE = "text/xml";
-    public static final String JSON_CONTENT_TYPE = "application/json";
-
-    public static final String AUTH_COOKIE = "hadoop.auth";
-    public static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
-    public static final String COOKIE = "Cookie";
-    public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
-    public static final String NEGOTIATE = "Negotiate";
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/security/FalconAuthorizationToken.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/security/FalconAuthorizationToken.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/security/FalconAuthorizationToken.java
deleted file mode 100644
index 7ab8f9a..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/security/FalconAuthorizationToken.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.falcon.regression.core.util.KerberosHelper;
-import org.apache.falcon.request.BaseRequest;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-import org.apache.log4j.Logger;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.SSLSession;
-import java.io.IOException;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
-import java.util.concurrent.ConcurrentHashMap;
-
-/** Class for obtaining authorization token. */
-public final class FalconAuthorizationToken {
-    private static final String AUTH_URL = "api/options";
-    private static final KerberosAuthenticator AUTHENTICATOR = new KerberosAuthenticator();
-    private static final FalconAuthorizationToken INSTANCE = new FalconAuthorizationToken();
-    private static final Logger LOGGER = Logger.getLogger(FalconAuthorizationToken.class);
-
-    // Use a hashmap so that we can cache the tokens.
-    private final ConcurrentHashMap<String, AuthenticatedURL.Token> tokens =
-        new ConcurrentHashMap<>();
-
-    private FalconAuthorizationToken() {
-    }
-
-    public static final HostnameVerifier ALL_TRUSTING_HOSTNAME_VERIFIER = new HostnameVerifier() {
-        @Override
-        public boolean verify(String hostname, SSLSession sslSession) {
-            return true;
-        }
-    };
-
-    private static void authenticate(String user, String protocol, String host, int port)
-        throws IOException, AuthenticationException, InterruptedException {
-        final URL url = new URL(String.format("%s://%s:%d/%s", protocol, host, port,
-            AUTH_URL + "?" + PseudoAuthenticator.USER_NAME + "=" + user));
-        LOGGER.info("Authorize using url: " + url.toString());
-
-        final AuthenticatedURL.Token currentToken = new AuthenticatedURL.Token();
-
-        /*using KerberosAuthenticator which falls back to PsuedoAuthenticator
-        instead of passing authentication type from the command line - bad factory*/
-        try {
-            HttpsURLConnection.setDefaultSSLSocketFactory(BaseRequest.getSslContext()
-                    .getSocketFactory());
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-        HttpsURLConnection.setDefaultHostnameVerifier(ALL_TRUSTING_HOSTNAME_VERIFIER);
-        UserGroupInformation callerUGI = KerberosHelper.getUGI(user);
-        callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override
-            public Void run() throws Exception {
-                new AuthenticatedURL(AUTHENTICATOR).openConnection(url, currentToken);
-                return null;
-            }
-        });
-        String key = getKey(user, protocol, host, port);
-
-        // initialize a hash map if its null.
-        LOGGER.info("Authorization Token: " + currentToken.toString());
-        INSTANCE.tokens.put(key, currentToken);
-    }
-
-    public static AuthenticatedURL.Token getToken(String user, String protocol, String host,
-                                                  int port, boolean overWrite)
-        throws IOException, AuthenticationException, InterruptedException {
-        String key = getKey(user, protocol, host, port);
-
-        /*if the tokens are null or if token is not found then we will go ahead and authenticate
-        or if we are asked to overwrite*/
-        if (!INSTANCE.tokens.containsKey(key) || overWrite) {
-            authenticate(user, protocol, host, port);
-        }
-        return INSTANCE.tokens.get(key);
-    }
-
-    public static AuthenticatedURL.Token getToken(String user, String protocol, String host,
-                                                  int port)
-        throws IOException, AuthenticationException, InterruptedException {
-        return getToken(user, protocol, host, port, false);
-    }
-
-    /*spnego token will be unique to the user and uri its being requested for.
-    Hence the key of the hash map is the combination of user, protocol, host and port.*/
-    private static String getKey(String user, String protocol, String host, int port) {
-        return String.format("%s-%s-%s-%d", user, protocol, host, port);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/pom.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/pom.xml b/falcon-regression/merlin/pom.xml
deleted file mode 100644
index e624925..0000000
--- a/falcon-regression/merlin/pom.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.falcon.regression</groupId>
-        <artifactId>falcon-regression</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-merlin</artifactId>
-    <description>Merlin - Regression test suite for Apache Falcon</description>
-    <name>Apache Falcon Regression Suite</name>
-    <packaging>jar</packaging>
-    <profiles>
-        <profile>
-            <id>hadoop-2</id>
-            <dependencies>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-common</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-client</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive.hcatalog</groupId>
-                    <artifactId>hive-webhcat-java-client</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-hdfs</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-azure</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-distcp</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive</groupId>
-                    <artifactId>hive-jdbc</artifactId>
-                </dependency>
-                <dependency>
-                    <groupId>org.apache.hive</groupId>
-                    <artifactId>hive-metastore</artifactId>
-                </dependency>
-
-
-            </dependencies>
-        </profile>
-    </profiles>
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.falcon.regression</groupId>
-            <artifactId>falcon-merlin-core</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-        </dependency>
-        <!-- Added so that at run time log4j is avaialble to capture the logs -->
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseTestClass.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseTestClass.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseTestClass.java
deleted file mode 100644
index f32da2d..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseTestClass.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.testHelper;
-
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.CleanupUtil;
-import org.apache.falcon.regression.core.util.Config;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Base class for test classes.
- */
-public class BaseTestClass {
-    private static String[] serverNames;
-    private static final Logger LOGGER = Logger.getLogger(BaseTestClass.class);
-
-    static {
-        prepareProperties();
-    }
-
-    protected ColoHelper prism;
-    protected List<ColoHelper> servers;
-    protected List<FileSystem> serverFS;
-    protected List<OozieClient> serverOC;
-    private String baseHDFSDir = "/tmp/falcon-regression";
-    public static final String PRISM_PREFIX = "prism";
-    protected Bundle[] bundles;
-    public static final String MINUTE_DATE_PATTERN = "/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}";
-
-
-    public BaseTestClass() {
-        // loginFromKeytab as the current user
-        prism = new ColoHelper(PRISM_PREFIX);
-        servers = getServers();
-        serverFS = new ArrayList<>();
-        serverOC = new ArrayList<>();
-        try {
-            for (ColoHelper server : servers) {
-                serverFS.add(server.getClusterHelper().getHadoopFS());
-                serverOC.add(server.getClusterHelper().getOozieClient());
-            }
-            cleanTestsDirs();
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-
-        bundles = new Bundle[serverNames.length];
-        removeTestClassEntities();
-    }
-
-    protected final String cleanAndGetTestDir() {
-        String dir = baseHDFSDir + '/' + this.getClass().getSimpleName();
-        try {
-            HadoopUtil.recreateDir(serverFS, dir);
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-        return dir;
-    }
-
-    private static void prepareProperties() {
-
-        serverNames = Config.getStringArray("servers");
-        for (int i = 0; i < serverNames.length; i++) {
-            serverNames[i] = serverNames[i].trim();
-        }
-    }
-
-    private List<ColoHelper> getServers() {
-        ArrayList<ColoHelper> returnList = new ArrayList<>();
-        for (String serverName : serverNames) {
-            returnList.add(new ColoHelper(serverName));
-        }
-        return returnList;
-    }
-
-    public void uploadDirToClusters(final String dstHdfsDir, final String localLocation)
-        throws IOException {
-        LOGGER.info(String.format("Uploading local dir: %s to all the clusters at: %s",
-            localLocation, dstHdfsDir));
-        for (FileSystem fs : serverFS) {
-            HadoopUtil.uploadDir(fs, dstHdfsDir, localLocation);
-        }
-    }
-
-    /**
-     * Method removes test class entities which belong to default test user.
-     */
-    public final void removeTestClassEntities() {
-        removeTestClassEntities(new String[]{null});
-    }
-
-    /**
-     * This method removes all entities which name starts with prefix derived from test class name. As entities can
-     * belong to different users, that users need to be listed as parameters. Note that null is a default test user.
-     * @param users users whose entities should be removed.
-     */
-    public final void removeTestClassEntities(String...users) {
-        for (String user : users) {
-            for (Bundle bundle : this.bundles) {
-                if (bundle != null) {
-                    bundle.deleteBundle(prism);
-                }
-            }
-            String className = this.getClass().getSimpleName();
-            CleanupUtil.cleanEntitiesWithPrefix(prism, className, user);
-            String deprecatedPrefix = 'A' + Integer.toHexString(className.hashCode());
-            CleanupUtil.cleanEntitiesWithPrefix(prism, deprecatedPrefix, user);
-        }
-    }
-
-
-    public final void cleanTestsDirs() throws IOException {
-        if (MerlinConstants.CLEAN_TESTS_DIR) {
-            for (FileSystem fs : serverFS) {
-                HadoopUtil.deleteDirIfExists(baseHDFSDir, fs);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseUITestClass.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseUITestClass.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseUITestClass.java
deleted file mode 100644
index 09b8265..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/testHelper/BaseUITestClass.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.testHelper;
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.Config;
-import org.openqa.selenium.Dimension;
-import org.openqa.selenium.Point;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.firefox.FirefoxDriver;
-import org.openqa.selenium.firefox.FirefoxProfile;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Base class for UI test classes.
- */
-public class BaseUITestClass extends BaseTestClass{
-
-    private static WebDriver driver;
-
-    public static WebDriver getDriver() {
-        return driver;
-    }
-
-    protected static void openBrowser() {
-
-        FirefoxProfile profile = new FirefoxProfile();
-        profile.setPreference("network.negotiate-auth.trusted-uris", MerlinConstants.PRISM_URL);
-
-        driver = new FirefoxDriver(profile);
-        driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS);
-
-        int width = Config.getInt("browser.window.width", 0);
-        int height = Config.getInt("browser.window.height", 0);
-
-        if (width * height == 0) {
-            driver.manage().window().maximize();
-        } else {
-            driver.manage().window().setPosition(new Point(0, 0));
-            driver.manage().window().setSize(new Dimension(width, height));
-        }
-
-    }
-
-
-    public static void closeBrowser() {
-        if (driver != null) {
-            driver.close();
-            driver.quit();
-            driver = null;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntitiesPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntitiesPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntitiesPage.java
deleted file mode 100644
index b175470..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntitiesPage.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.pages;
-
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.TimeoutException;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.ui.WebDriverWait;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Page with list of entities.
- */
-public class EntitiesPage extends Page {
-
-    protected static final String ENTITIES_TABLE = "//table[@id='entity-list']/tbody/tr";
-
-    private static final Logger LOGGER = Logger.getLogger(EntitiesPage.class);
-    private static final String ACTIVE_NXT_BTN = "//ul/li[not(@class)]/a[contains(text(),'»')]";
-    private static final String PAGE_NUMBER = "//ul[@class='pagination']/li[@class='active']/a";
-
-    public EntitiesPage(WebDriver driver, ColoHelper helper, EntityType type) {
-        super(driver, helper);
-        url += "/index.html?type=" + type.toString().toLowerCase();
-
-        expectedElement = ENTITIES_TABLE;
-        notFoundMsg = String.format("No entities on %sS page", type);
-    }
-
-    /**
-     * Returns status of defined entity.
-     * @param entityName name of entity
-     * @return status of defined entity
-     */
-    public EntityStatus getEntityStatus(String entityName) {
-        navigateTo();
-        while (true) {
-            String status = getEntitiesOnPage().get(entityName);
-            if (status != null) {
-                return EntityStatus.valueOf(status);
-            }
-            if (nextPagePresent()) {
-                goNextPage();
-            } else {
-                break;
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Loads next page.
-     */
-    private void goNextPage() {
-        LOGGER.info("Navigating to next page...");
-        WebElement nextButton = driver.findElement(By.xpath(ACTIVE_NXT_BTN));
-        nextButton.click();
-        waitForElement(expectedElement, DEFAULT_TIMEOUT, "Next page didn't load");
-    }
-
-
-    /**
-     * Checks if next page is present.
-     * @return true if next page is present
-     */
-
-    private boolean nextPagePresent() {
-        LOGGER.info("Checking if next page is present...");
-        try {
-            new WebDriverWait(driver, DEFAULT_TIMEOUT).until(new Condition(ACTIVE_NXT_BTN, true));
-            return true;
-        } catch (TimeoutException e) {
-            return false;
-        }
-    }
-
-    /**
-     * Returns page number.
-     * @return page number
-     */
-    public int getPageNumber() {
-        String number = driver.findElement(By.xpath(PAGE_NUMBER)).getText();
-        return Integer.parseInt(number);
-    }
-
-    private Map<String, String> getEntitiesOnPage() {
-        LOGGER.info("Reading all entities on page...");
-        List<WebElement> lines = driver.findElements(By.xpath(ENTITIES_TABLE));
-        Map<String, String> entities = new HashMap<>();
-        for (WebElement line : lines) {
-            WebElement name = line.findElement(By.xpath("./td[1]/a"));
-            WebElement status = line.findElement(By.xpath("./td[2]"));
-            entities.put(name.getText(), status.getText());
-        }
-        return entities;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntityPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntityPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntityPage.java
deleted file mode 100644
index 6b6d17f..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/EntityPage.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.pages;
-
-
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import java.io.StringReader;
-
-/**
- * Page of entity (feed or process or cluster).
- * @param <T> type of entity
- */
-public class EntityPage<T> extends Page {
-
-    private Class<T> type;
-
-    protected EntityPage(WebDriver driver, ColoHelper helper, EntityType type, Class<T> entity, String entityName) {
-        super(driver, helper);
-        url += String.format("/entity.html?type=%s&id=%s", type.toString().toLowerCase(), entityName);
-        this.type = entity;
-        expectedElement = "//textarea[@id='entity-def-textarea' and contains(text(), 'xml')]";
-        notFoundMsg = String.format(" %s '%s' not found!", type, entityName);
-    }
-
-    /**
-     * Returns page of defined CLUSTER entity.
-     * @param entityName name of defined entity
-     * @return page of defined CLUSTER entity
-     */
-    public static EntityPage<Cluster> getClusterPage(WebDriver driver, ColoHelper helper, String entityName) {
-        return new EntityPage<>(driver, helper, EntityType.CLUSTER, Cluster.class, entityName);
-    }
-
-    /**
-     * Returns page of defined FEED entity.
-     * @param entityName name of defined entity
-     * @return page of defined FEED entity
-     */
-    public static EntityPage<Feed> getFeedPage(WebDriver driver, ColoHelper helper, String entityName) {
-        return new EntityPage<>(driver, helper, EntityType.FEED, Feed.class, entityName);
-    }
-
-    /**
-     * Returns entity object.
-     * @return entity object
-     * @throws JAXBException
-     */
-    @SuppressWarnings("unchecked")
-    public T getEntity() throws JAXBException {
-        String entity = driver.findElement(By.id("entity-def-textarea")).getText();
-        JAXBContext jc = JAXBContext.newInstance(type);
-        Unmarshaller u = jc.createUnmarshaller();
-        return (T) u.unmarshal(new StringReader(entity));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/Page.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/Page.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/Page.java
deleted file mode 100644
index f176949..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/Page.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.pages;
-
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.SearchContext;
-import org.openqa.selenium.TimeoutException;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.ui.ExpectedCondition;
-import org.openqa.selenium.support.ui.WebDriverWait;
-
-
-/**
- * Abstract page of Falcon UI.
- */
-public abstract class Page {
-    protected static final int DEFAULT_TIMEOUT = 10;
-    protected String url;
-    protected WebDriver driver;
-
-    protected String expectedElement;
-    protected String notFoundMsg;
-
-    private static final Logger LOGGER = Logger.getLogger(Page.class);
-
-    Page(WebDriver driver, ColoHelper helper) {
-        this.driver = driver;
-        url = helper.getClusterHelper().getHostname();
-    }
-
-    public Page(WebDriver driver) {
-        this.driver = driver;
-    }
-    /**
-     * Go to page in browser.
-     */
-    public void navigateTo() {
-        LOGGER.info("Navigating to " + url);
-        driver.get(url);
-        waitForElement(expectedElement, DEFAULT_TIMEOUT, notFoundMsg);
-    }
-
-    /**
-     * Refresh page.
-     */
-    public void refresh() {
-        LOGGER.info("Refreshing page " + url);
-        driver.navigate().refresh();
-    }
-
-    /**
-     * Wait for some WebElement defined by xpath. Throws TimeoutException if element is not visible after defined time.
-     * @param webElement find xpath inside this WebElement
-     * @param xpath xpath of expected WebElement
-     * @param timeoutSeconds how many seconds we should wait for element
-     * @param errMessage message for TimeoutException
-     */
-    public void waitForElement(WebElement webElement, final String xpath,
-                               final long timeoutSeconds, String errMessage) {
-        waitForElementAction(webElement, xpath, timeoutSeconds, errMessage, true);
-    }
-
-    /**
-     * Wait for some WebElement defined by xpath. Throws TimeoutException if element is not visible after defined time.
-     * @param xpath xpath of expected WebElement
-     * @param timeoutSeconds how many seconds we should wait for element
-     * @param errMessage message for TimeoutException
-     */
-    public void waitForElement(final String xpath, final long timeoutSeconds, String errMessage) {
-        waitForElementAction(null, xpath, timeoutSeconds, errMessage, true);
-    }
-
-    /**
-     * Wait until WebElement disappears.
-     * @param xpath xpath of expected WebElement
-     * @param timeoutSeconds how many seconds we should wait for disappearing
-     * @param errMessage message for TimeoutException
-     */
-    public void waitForDisappear(final String xpath, final long timeoutSeconds, String errMessage) {
-        waitForElementAction(null, xpath, timeoutSeconds, errMessage, false);
-    }
-
-    /**
-     * Wait until WebElement became visible.
-     * @param xpath xpath of expected WebElement
-     * @param timeoutSeconds how many seconds we should wait for visibility
-     * @param errMessage message for TimeoutException
-     */
-    public void waitForDisplayed(String xpath, long timeoutSeconds, String errMessage) {
-        waitForElement(xpath, timeoutSeconds, errMessage);
-        WebElement element = driver.findElement(By.xpath(xpath));
-        for (int i = 0; i < timeoutSeconds * 10; i++) {
-            if (element.isDisplayed()) {
-                return;
-            }
-            TimeUtil.sleepSeconds(0.1);
-        }
-        throw new TimeoutException(errMessage);
-    }
-
-    private void waitForElementAction(WebElement webElement, String xpath, long timeoutSeconds,
-                                      String errMessage, boolean expected) {
-        try {
-            new WebDriverWait(driver, timeoutSeconds)
-                .until(new Condition(webElement, xpath, expected));
-        } catch (TimeoutException e) {
-            TimeoutException ex = new TimeoutException(errMessage);
-            ex.initCause(e);
-            throw ex;
-        }
-    }
-
-    /**
-     * Status of entity that can be shown on Falcon UI.
-     */
-    public enum EntityStatus {
-        UNKNOWN, SUBMITTED, RUNNING, SUSPENDED
-    }
-
-    static class Condition implements ExpectedCondition<Boolean> {
-
-        private final boolean isPresent;
-        private String xpath;
-        private WebElement webElement;
-
-        public Condition(String xpath, boolean isPresent) {
-            this.xpath = xpath;
-            this.isPresent = isPresent;
-        }
-
-        public Condition(WebElement webElement, String xpath, boolean isPresent) {
-            this.webElement = webElement;
-            this.xpath = xpath;
-            this.isPresent = isPresent;
-        }
-
-        @Override
-        public Boolean apply(WebDriver webDriver) {
-            SearchContext search = (webElement == null) ? webDriver : webElement;
-            return search.findElements(By.xpath(xpath)).isEmpty() != isPresent;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/ProcessPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/ProcessPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/ProcessPage.java
deleted file mode 100644
index 36fb1fe..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/pages/ProcessPage.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.pages;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.interactions.Actions;
-import org.openqa.selenium.Point;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-/**
- * Page of a process entity.
- */
-public class ProcessPage extends EntityPage<Process> {
-
-    private static final Logger LOGGER = Logger.getLogger(ProcessPage.class);
-    private boolean isLineageOpened = false;
-
-    private static final String INSTANCES_PANEL = "//div[@id='panel-instance']//span";
-    private static final String INSTANCE_STATUS_TEMPLATE = INSTANCES_PANEL + "[contains(..,'%s')]";
-    private static final String LINEAGE_LINK_TEMPLATE =
-        "//a[@class='lineage-href' and @data-instance-name='%s']";
-
-    //Lineage information xpaths
-    private static final String CLOSE_LINEAGE_LINK_TEMPLATE =
-        "//body[@class='modal-open']//button[contains(., 'Close')]";
-    private static final String LINEAGE_MODAL = "//div[@id='lineage-modal']";
-    private static final String SVG_ELEMENT = "//*[name() = 'svg']/*[name()='g']/*[name()='g']";
-    private static final String VERTICES_BLOCKS = SVG_ELEMENT + "[not(@class='lineage-link')]";
-    private static final String VERTICES_TEXT = VERTICES_BLOCKS
-        + "//div[@class='lineage-node-text']";
-    private static final String EDGE = SVG_ELEMENT + "[@class='lineage-link']//*[name()='path']";
-    private static final String CIRCLE = "//*[name() = 'circle']";
-    private static final String VERTICES = VERTICES_BLOCKS + CIRCLE;
-    private static final String VERTEX_BLOCK_TEMPLATE = VERTICES_BLOCKS + "[contains(., '%s')]";
-    private static final String VERTEX_TEMPLATE = VERTEX_BLOCK_TEMPLATE + CIRCLE;
-
-    private static final String LINEAGE_INFO_PANEL_LIST = "//div[@id='lineage-info-panel']"
-        + "//div[@class='col-md-3']";
-
-    private static final String LINEAGE_TITLE = LINEAGE_MODAL + "//div[@class='modal-header']/h4";
-
-    private static final String LINEAGE_LEGENDS_BLOCK = LINEAGE_MODAL
-        + "//div[@class='modal-body']/div[ul[@class='lineage-legend']]";
-    private static final String LINEAGE_LEGENDS_TITLE = LINEAGE_LEGENDS_BLOCK + "/h4";
-    private static final String LINEAGE_LEGENDS_ELEMENTS = LINEAGE_LEGENDS_BLOCK + "/ul/li";
-
-    public ProcessPage(WebDriver driver, ColoHelper helper, String entityName) {
-        super(driver, helper, EntityType.PROCESS, Process.class, entityName);
-    }
-
-    /**
-     * @param nominalTime particular instance of process, defined by it's start time
-     */
-    public void openLineage(String nominalTime) {
-        waitForElement(String.format(LINEAGE_LINK_TEMPLATE, nominalTime), DEFAULT_TIMEOUT,
-            "Lineage button didn't appear");
-        LOGGER.info("Working with instance: " + nominalTime);
-        WebElement lineage =
-            driver.findElement(By.xpath(String.format(LINEAGE_LINK_TEMPLATE, nominalTime)));
-        LOGGER.info("Opening lineage...");
-        lineage.click();
-        waitForElement(VERTICES, DEFAULT_TIMEOUT, "Circles not found");
-        waitForDisplayed(LINEAGE_TITLE, DEFAULT_TIMEOUT, "Lineage title not found");
-        isLineageOpened = true;
-    }
-
-    public void closeLineage() {
-        LOGGER.info("Closing lineage...");
-        if (isLineageOpened) {
-            WebElement close = driver.findElement(By.xpath(CLOSE_LINEAGE_LINK_TEMPLATE));
-            close.click();
-            isLineageOpened = false;
-            waitForDisappear(CLOSE_LINEAGE_LINK_TEMPLATE, DEFAULT_TIMEOUT,
-                "Lineage didn't disappear");
-        }
-    }
-
-    @Override
-    public void refresh() {
-        super.refresh();
-        isLineageOpened = false;
-    }
-
-    /**
-     * @return map with instances names and their nominal start time
-     */
-    public HashMap<String, List<String>> getAllVertices() {
-        LOGGER.info("Getting all vertices from lineage graph...");
-        HashMap<String, List<String>> map = null;
-        if (isLineageOpened) {
-            waitForElement(VERTICES_TEXT, DEFAULT_TIMEOUT,
-                "Vertices blocks with names not found");
-            List<WebElement> blocks = driver.findElements(By.xpath(VERTICES_TEXT));
-            LOGGER.info(blocks.size() + " elements found");
-            map = new HashMap<>();
-            for (WebElement block : blocks) {
-                waitForElement(block, ".[contains(.,'/')]", DEFAULT_TIMEOUT,
-                    "Expecting text to contain '/' :" + block.getText());
-                String text = block.getText();
-                LOGGER.info("Vertex: " + text);
-                String[] separate = text.split("/");
-                String name = separate[0];
-                String nominalTime = separate[1];
-                if (map.containsKey(name)) {
-                    map.get(name).add(nominalTime);
-                } else {
-                    List<String> instances = new ArrayList<>();
-                    instances.add(nominalTime);
-                    map.put(name, instances);
-                }
-            }
-        }
-        return map;
-    }
-
-    /**
-     * @return list of all vertices names
-     */
-    public List<String> getAllVerticesNames() {
-        LOGGER.info("Getting all vertices names from lineage graph...");
-        List<String> list = new ArrayList<>();
-        if (isLineageOpened) {
-            waitForElement(CLOSE_LINEAGE_LINK_TEMPLATE, DEFAULT_TIMEOUT,
-                "Close Lineage button not found");
-            waitForElement(VERTICES_BLOCKS, DEFAULT_TIMEOUT,
-                "Vertices not found");
-            List<WebElement> blocks = driver.findElements(By.xpath(VERTICES_BLOCKS));
-            LOGGER.info(blocks.size() + " elements found");
-            for (WebElement block : blocks) {
-                list.add(block.getText());
-            }
-        }
-        LOGGER.info("Vertices: " + list);
-        return list;
-    }
-
-    /**
-     * Vertex is defined by it's entity name and particular time of it's creation.
-     */
-    public void clickOnVertex(String entityName, String nominalTime) {
-        LOGGER.info("Clicking on vertex " + entityName + '/' + nominalTime);
-        if (isLineageOpened) {
-            WebElement circle = driver.findElement(By.xpath(String.format(VERTEX_TEMPLATE,
-                entityName + '/' + nominalTime)));
-            Actions builder = new Actions(driver);
-            builder.click(circle).build().perform();
-            TimeUtil.sleepSeconds(0.5);
-        }
-    }
-
-    /**
-     * @return map of parameters from info panel and their values
-     */
-    public HashMap<String, String> getPanelInfo() {
-        LOGGER.info("Getting info panel values...");
-        HashMap<String, String> map = null;
-        if (isLineageOpened) {
-            //check if vertex was clicked
-            waitForElement(LINEAGE_INFO_PANEL_LIST, DEFAULT_TIMEOUT, "Info panel not found");
-            List<WebElement> infoBlocks = driver.findElements(By.xpath(LINEAGE_INFO_PANEL_LIST));
-            LOGGER.info(infoBlocks.size() + " values found");
-            map = new HashMap<>();
-            for (WebElement infoBlock : infoBlocks) {
-                String text = infoBlock.getText();
-                String[] values = text.split("\n");
-                map.put(values[0], values[1]);
-            }
-        }
-        LOGGER.info("Values: " + map);
-        return map;
-    }
-
-    /**
-     * @return map of legends as key and their names on UI as values
-     */
-    public HashMap<String, String> getLegends() {
-        HashMap<String, String> map = null;
-        if (isLineageOpened) {
-            map = new HashMap<>();
-            List<WebElement> legends = driver.findElements(By.xpath(LINEAGE_LEGENDS_ELEMENTS));
-            for (WebElement legend : legends) {
-                String value = legend.getText();
-                String elementClass = legend.getAttribute("class");
-                map.put(elementClass, value);
-            }
-        }
-        return map;
-    }
-
-    /**
-     * @return the main title of Lineage UI
-     */
-    public String getLineageTitle() {
-        LOGGER.info("Getting Lineage title...");
-        if (isLineageOpened) {
-            return driver.findElement(By.xpath(LINEAGE_TITLE)).getText();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * @return the name of legends block
-     */
-    public String getLegendsTitle() {
-        LOGGER.info("Getting Legends title...");
-        if (isLineageOpened) {
-            return driver.findElement(By.xpath(LINEAGE_LEGENDS_TITLE)).getText();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * @return list of edges present on UI. Each edge presented as two 2d points - beginning and
-     * the end of the edge.
-     */
-    public List<Point[]> getEdgesFromGraph() {
-        List<Point[]> pathsEndpoints = null;
-        LOGGER.info("Getting edges from lineage graph...");
-        if (isLineageOpened) {
-            pathsEndpoints = new ArrayList<>();
-            List<WebElement> paths = driver.findElements(By.xpath(EDGE));
-            LOGGER.info(paths.size() + " edges found");
-            for (WebElement path : paths) {
-                String[] coordinates = path.getAttribute("d").split("[MLC,]");
-                int x = 0, y, i = 0;
-                while (i < coordinates.length) {
-                    if (!coordinates[i].isEmpty()) {
-                        x = (int) Double.parseDouble(coordinates[i]);
-                        break;
-                    } else {
-                        i++;
-                    }
-                }
-                y = (int) Double.parseDouble(coordinates[i + 1]);
-                Point startPoint = new Point(x, y);
-                x = (int) Math.round(Double.parseDouble(coordinates[coordinates.length - 2]));
-                y = (int) Math.round(Double.parseDouble(coordinates[coordinates.length - 1]));
-                Point endPoint = new Point(x, y);
-                LOGGER.info("Edge " + startPoint + '→' + endPoint);
-                pathsEndpoints.add(new Point[]{startPoint, endPoint});
-            }
-        }
-        return pathsEndpoints;
-    }
-
-    /**
-     * @return common value for radius of every vertex (circle) on the graph
-     */
-    public int getCircleRadius() {
-        LOGGER.info("Getting value of vertex radius...");
-        WebElement circle = driver.findElements(By.xpath(VERTICES)).get(0);
-        return Integer.parseInt(circle.getAttribute("r"));
-    }
-
-    /**
-     * Finds vertex on the graph by its name and evaluates its coordinates as 2d point.
-     * @param vertex the name of vertex which point is needed
-     * @return Point(x,y) object
-     */
-    public Point getVertexEndpoint(String vertex) {
-        /** get circle of start vertex */
-        LOGGER.info("Getting vertex coordinates...");
-        WebElement block = driver.findElement(By.xpath(String.format(VERTEX_BLOCK_TEMPLATE, vertex)));
-        String attribute = block.getAttribute("transform");
-        attribute = attribute.replaceAll("[a-zA-Z]", "");
-        String[] numbers = attribute.replaceAll("[()]", "").split(",");
-        return new Point(Integer.parseInt(numbers[0]), Integer.parseInt(numbers[1]));
-    }
-
-    /**
-     * Returns status of instance from instances panel.
-     * @param instanceDate date stamp of instance
-     * @return status of instance from instances panel
-     */
-    public String getInstanceStatus(String instanceDate) {
-        waitForInstancesPanel();
-        LOGGER.info("Getting status of " + instanceDate + " instance");
-        List<WebElement> status =
-            driver.findElements(By.xpath(String.format(INSTANCE_STATUS_TEMPLATE, instanceDate)));
-        if (status.isEmpty()) {
-            return null;
-        } else {
-            return status.get(0).getAttribute("class").replace("instance-icons instance-link-", "");
-        }
-    }
-
-    /**
-     * Checks if 'Lineage' link is present on instances panel.
-     * @param instanceDate date stamp of instance
-     * @return true if link is present
-     */
-    public boolean isLineageLinkPresent(String instanceDate) {
-        waitForInstancesPanel();
-        LOGGER.info("Checking if 'Lineage' link is present for " + instanceDate);
-        List<WebElement> lineage =
-            driver.findElements(By.xpath(String.format(LINEAGE_LINK_TEMPLATE, instanceDate)));
-        return !lineage.isEmpty();
-    }
-
-    private void waitForInstancesPanel() {
-        waitForElement(INSTANCES_PANEL, DEFAULT_TIMEOUT, "Instances panel didn't appear");
-    }
-
-    /**
-     * Checks whether vertex is terminal or not.
-     * @param vertexName name of vertex
-     * @return whether it is terminal or not
-     */
-    public boolean isTerminal(String vertexName) {
-        LOGGER.info("Checking if " + vertexName + " is 'terminal' instance");
-        waitForElement(String.format(VERTEX_TEMPLATE, vertexName), DEFAULT_TIMEOUT,
-            "Vertex not found");
-        WebElement vertex = driver.findElement(By.xpath(String.format(VERTEX_TEMPLATE, vertexName)));
-        String vertexClass = vertex.getAttribute("class");
-        return vertexClass.contains("lineage-node-terminal");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/AbstractSearchPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/AbstractSearchPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/AbstractSearchPage.java
deleted file mode 100644
index ab73092..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/AbstractSearchPage.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import com.google.common.util.concurrent.SimpleTimeLimiter;
-import com.google.common.util.concurrent.TimeLimiter;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.commons.lang3.tuple.MutablePair;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.ui.pages.Page;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.JavascriptExecutor;
-import org.openqa.selenium.NoSuchElementException;
-import org.openqa.selenium.TimeoutException;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.PageFactory;
-import org.openqa.selenium.support.ui.ExpectedCondition;
-import org.openqa.selenium.support.ui.Select;
-import org.openqa.selenium.support.ui.WebDriverWait;
-import org.testng.Assert;
-
-import javax.annotation.Nullable;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-
-/** Parent page object for all the search ui pages. */
-public abstract class AbstractSearchPage extends Page {
-
-    public static final String UI_URL = MerlinConstants.PRISM_URL;
-    private static final Logger LOGGER = Logger.getLogger(AbstractSearchPage.class);
-    public static final int PAGELOAD_TIMEOUT_THRESHOLD = 10;
-    public static final int ALERT_LIFETIME = 3000;
-
-    public AbstractSearchPage(WebDriver driver) {
-        super(driver);
-        waitForAngularToFinish();
-        LOGGER.info("Going to initialize Page Header.");
-        pageHeader = PageFactory.initElements(driver, PageHeader.class);
-        LOGGER.info("Initialization done.");
-    }
-
-    private PageHeader pageHeader;
-
-    @FindBy(className = "mainUIView")
-    protected WebElement mainUI;
-
-    public PageHeader getPageHeader() {
-        return pageHeader;
-    }
-
-    protected WebElement getParentElement(WebElement element) {
-        return element.findElement(By.xpath(".."));
-    }
-
-    /**
-     * A rough check to make sure that we are indeed on the correct page.
-     */
-    public abstract void checkPage();
-
-    // Utility method to enter the data slowly on an element
-    public static void sendKeysSlowly(WebElement webElement, String data){
-        for (String str : data.split("")) {
-            webElement.sendKeys(str);
-        }
-
-    }
-
-    public static void clearAndSet(WebElement webElement, String val) {
-        webElement.clear();
-        webElement.sendKeys(val);
-    }
-
-    public static void clearAndSetSlowly(WebElement webElement, String val) {
-        webElement.clear();
-        sendKeysSlowly(webElement, val);
-    }
-
-    protected WebElement findElementByNgModel(String ngModelName) {
-        // trying to get an xpath that looks like: "//*[@ng-model='UIModel.retry.policy']"
-        final String xpathExpression = "//*[@ng-model='" + ngModelName + "']";
-        final List<WebElement> webElements = driver.findElements(By.xpath(xpathExpression));
-        Assert.assertEquals(webElements.size(), 1, "Element is not unique for ng-model: " + ngModelName);
-        return webElements.get(0);
-    }
-
-    protected void selectNgModelByVisibleText(String ngModelName, String visibleText) {
-        final WebElement webElement = findElementByNgModel(ngModelName);
-        final Select select = new Select(webElement);
-        select.selectByVisibleText(visibleText);
-    }
-
-    protected void clearAndSetByNgModel(String ngModelName, String value) {
-        final WebElement webElement = findElementByNgModel(ngModelName);
-        clearAndSet(webElement, value);
-    }
-
-    protected void clearAndSetSlowlyByNgModel(String ngModelName, String value) {
-        final WebElement webElement = findElementByNgModel(ngModelName);
-        clearAndSetSlowly(webElement, value);
-    }
-
-    protected void clickById(String id) {
-        final List<WebElement> webElements = driver.findElements(By.id(id));
-        Assert.assertEquals(webElements.size(), 1, "Element is not unique.");
-        webElements.get(0).click();
-    }
-
-    protected void clickByNgModel(String ngModelName) {
-        final WebElement webElement = findElementByNgModel(ngModelName);
-        webElement.click();
-    }
-
-    // Utility method to get Dropdown Values
-    public List<String> getDropdownValues(Select element){
-        List<WebElement> allOptions = element.getOptions();
-        List<String> values = new ArrayList<>();
-        for (WebElement option:allOptions){
-            values.add(option.getText());
-        }
-        return values;
-    }
-
-
-    protected void waitForAngularToFinish() {
-        final String javaScript = "return (window.angular != null) && "
-            + "(angular.element(document).injector() != null) && "
-            + "(angular.element(document).injector().get('$http').pendingRequests.length === 0)";
-        boolean isLoaded = false;
-        for (int i = 0; i < PAGELOAD_TIMEOUT_THRESHOLD && !isLoaded; i++) {
-            TimeLimiter timeLimiter = new SimpleTimeLimiter();
-            final JavascriptExecutor proxyJsExecutor =
-                timeLimiter.newProxy((JavascriptExecutor) driver, JavascriptExecutor.class, 10, TimeUnit.SECONDS);
-            try {
-                final Object output = proxyJsExecutor.executeScript(javaScript);
-                isLoaded = Boolean.valueOf(output.toString());
-            } catch (Exception e) {
-                LOGGER.info("Checking of pending request failed because of: " + ExceptionUtils.getFullStackTrace(e));
-            }
-            LOGGER.info(i+1 + ". waiting on angular to finish.");
-            TimeUtil.sleepSeconds(1);
-        }
-        LOGGER.info("angular is done continuing...");
-    }
-
-    public String getActiveAlertText() {
-        if (waitForAlert()) {
-            waitForAngularToFinish();
-            String script = "return $('div.messages.notifs > div:last-child').text();";
-            String message = (String)((JavascriptExecutor)driver).executeScript(script);
-            return message.trim();
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Wait for active alert. Check it's lifetime (the period when alert is displayed).
-     */
-    public void validateAlertLifetime() {
-        final WebElement alertsBlock = driver.findElement(By.xpath("//div[@class='messages notifs']"));
-        try {
-            final MutablePair<Long, Long> pair = new MutablePair<>(Long.MAX_VALUE, Long.MAX_VALUE);
-            // wait 5 seconds for alert to start blinking and record time of first blink
-            new WebDriverWait(driver, 5, 100).until(new ExpectedCondition<Boolean>() {
-                @Nullable
-                @Override
-                public Boolean apply(WebDriver webDriver) {
-                    String style = alertsBlock.getAttribute("style");
-                    if ((style.contains("opacity") && !style.contains("opacity: 1;"))
-                            || style.contains("display: block;")) {
-                        pair.setLeft(System.currentTimeMillis());
-                        return true;
-                    }
-                    return false;
-                }
-            });
-            // wait 5 seconds for alert to stop blinking and record time of stoppage
-            for (int i = 0; i < ALERT_LIFETIME + 3000; i += 100) {
-                String style = alertsBlock.getAttribute("style");
-                if (style.contains("display: none;")) {
-                    pair.setRight(Math.min(System.currentTimeMillis(), pair.getRight()));
-                } else {
-                    pair.setRight(Long.MAX_VALUE);
-                }
-                TimeUtil.sleepSeconds(0.1);
-            }
-            long diff = pair.getRight() - pair.getLeft();
-            LOGGER.info(String.format("Alert was live %d millis.", pair.getRight() - pair.getLeft()));
-            Assert.assertTrue(ALERT_LIFETIME <= diff, "Alert was present for too short period of time");
-        } catch (TimeoutException e) {
-            Assert.fail("Alert didn't appear in 5 seconds.");
-        }
-    }
-
-    /**
-     * Wait for active alert.
-     * @return true is alert is present
-     */
-    protected boolean waitForAlert() {
-        final WebElement alertsBlock = driver.findElement(By.xpath("//div[@class='messages notifs']"));
-        try {
-            new WebDriverWait(driver, 5).until(new ExpectedCondition<Boolean>() {
-                @Nullable
-                @Override
-                public Boolean apply(WebDriver webDriver) {
-                    String style = alertsBlock.getAttribute("style");
-                    return (style.contains("opacity") && !style.contains("opacity: 1;"))
-                            || style.contains("display: block;");
-                }
-            });
-            return true;
-        } catch (TimeoutException e) {
-            return false;
-        }
-    }
-
-    /**
-     * Performs simple check of element presence.
-     */
-    public WebElement getElementOrNull(String xpath) {
-        try {
-            return driver.findElement(By.xpath(xpath));
-        } catch (NoSuchElementException ignored) {
-            return null;
-        }
-    }
-
-
-    /**
-     * Method imitates click on check box. If click is not performed method retries the click.
-     * @param expectedState whether check box is expected to be enabled or not after click.
-     */
-    protected void clickCheckBoxSecurely(WebElement checkBox, boolean expectedState) {
-        double gap = 0.5;
-        for (int attempt = 1; attempt <= (DEFAULT_TIMEOUT / gap); attempt++) {
-            LOGGER.info("Attempt to click a check box: " + attempt);
-            checkBox.click();
-            if (checkBox.isSelected() == expectedState) {
-                return;
-            }
-            TimeUtil.sleepSeconds(gap);
-        }
-        Assert.fail("Check box state was not changed even in " + DEFAULT_TIMEOUT + " seconds.");
-    }
-
-}


[39/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/FeedEntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/FeedEntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/FeedEntityParser.java
deleted file mode 100644
index c942862..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/FeedEntityParser.java
+++ /dev/null
@@ -1,719 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.catalog.CatalogServiceFactory;
-import org.apache.falcon.entity.CatalogStorage;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.FileSystemStorage;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityGraph;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.ACL;
-import org.apache.falcon.entity.v0.feed.Extract;
-import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.MergeType;
-import org.apache.falcon.entity.v0.feed.Properties;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.feed.Sla;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.group.FeedGroup;
-import org.apache.falcon.group.FeedGroupMap;
-import org.apache.falcon.service.LifecyclePolicyMap;
-import org.apache.falcon.util.DateUtil;
-import org.apache.falcon.util.HadoopQueueUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-
-/**
- * Parser that parses feed entity definition.
- */
-public class FeedEntityParser extends EntityParser<Feed> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(FeedEntityParser.class);
-
-    public FeedEntityParser() {
-        super(EntityType.FEED);
-    }
-
-    @Override
-    public void validate(Feed feed) throws FalconException {
-        if (feed.getTimezone() == null) {
-            feed.setTimezone(TimeZone.getTimeZone("UTC"));
-        }
-
-        if (feed.getClusters() == null) {
-            throw new ValidationException("Feed should have at least one cluster");
-        }
-
-        validateLifecycle(feed);
-        validateACL(feed);
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            validateEntityExists(EntityType.CLUSTER, cluster.getName());
-
-            // Optinal end_date
-            if (cluster.getValidity().getEnd() == null) {
-                cluster.getValidity().setEnd(DateUtil.NEVER);
-            }
-
-            validateClusterValidity(cluster.getValidity().getStart(), cluster.getValidity().getEnd(),
-                    cluster.getName());
-            validateClusterHasRegistry(feed, cluster);
-            validateFeedCutOffPeriod(feed, cluster);
-            if (FeedHelper.isImportEnabled(cluster)) {
-                validateEntityExists(EntityType.DATASOURCE, FeedHelper.getImportDatasourceName(cluster));
-                validateFeedExtractionType(feed, cluster);
-                validateFeedImportArgs(cluster);
-                validateFeedImportFieldExcludes(cluster);
-            }
-            if (FeedHelper.isExportEnabled(cluster)) {
-                validateEntityExists(EntityType.DATASOURCE, FeedHelper.getExportDatasourceName(cluster));
-                validateFeedExportArgs(cluster);
-                validateFeedExportFieldExcludes(cluster);
-            }
-        }
-
-        validateFeedStorage(feed);
-        validateFeedPath(feed);
-        validateFeedPartitionExpression(feed);
-        validateFeedGroups(feed);
-        validateFeedSLA(feed);
-        validateProperties(feed);
-        validateHadoopQueue(feed);
-
-        // Seems like a good enough entity object for a new one
-        // But is this an update ?
-
-        Feed oldFeed = ConfigurationStore.get().get(EntityType.FEED, feed.getName());
-        if (oldFeed == null) {
-            return; // Not an update case
-        }
-
-        // Is actually an update. Need to iterate over all the processes
-        // depending on this feed and see if they are valid with the new
-        // feed reference
-        EntityGraph graph = EntityGraph.get();
-        Set<Entity> referenced = graph.getDependents(oldFeed);
-        Set<Process> processes = findProcesses(referenced);
-        if (processes.isEmpty()) {
-            return;
-        }
-
-        ensureValidityFor(feed, processes);
-    }
-
-    private void validateLifecycle(Feed feed) throws FalconException {
-        LifecyclePolicyMap map = LifecyclePolicyMap.get();
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            if (FeedHelper.isLifecycleEnabled(feed, cluster.getName())) {
-                if (FeedHelper.getRetentionStage(feed, cluster.getName()) == null) {
-                    throw new ValidationException("Retention is a mandatory stage, didn't find it for cluster: "
-                            + cluster.getName());
-                }
-                validateRetentionFrequency(feed, cluster.getName());
-                for (String policyName : FeedHelper.getPolicies(feed, cluster.getName())) {
-                    map.get(policyName).validate(feed, cluster.getName());
-                }
-            }
-        }
-    }
-
-    private void validateRetentionFrequency(Feed feed, String clusterName) throws FalconException {
-        Frequency retentionFrequency = FeedHelper.getLifecycleRetentionFrequency(feed, clusterName);
-        Frequency feedFrequency = feed.getFrequency();
-        if (DateUtil.getFrequencyInMillis(retentionFrequency) < DateUtil.getFrequencyInMillis(feedFrequency)) {
-            throw new ValidationException("Retention can not be more frequent than data availability.");
-        }
-    }
-
-    private Set<Process> findProcesses(Set<Entity> referenced) {
-        Set<Process> processes = new HashSet<Process>();
-        for (Entity entity : referenced) {
-            if (entity.getEntityType() == EntityType.PROCESS) {
-                processes.add((Process) entity);
-            }
-        }
-        return processes;
-    }
-
-    private void validateFeedSLA(Feed feed) throws FalconException {
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            Sla clusterSla = FeedHelper.getSLA(cluster, feed);
-            if (clusterSla != null) {
-                Frequency slaLowExpression = clusterSla.getSlaLow();
-                ExpressionHelper evaluator = ExpressionHelper.get();
-                ExpressionHelper.setReferenceDate(new Date());
-                Date slaLow = new Date(evaluator.evaluate(slaLowExpression.toString(), Long.class));
-
-                Frequency slaHighExpression = clusterSla.getSlaHigh();
-                Date slaHigh = new Date(evaluator.evaluate(slaHighExpression.toString(), Long.class));
-
-                if (slaLow.after(slaHigh)) {
-                    throw new ValidationException("slaLow of Feed: " + slaLowExpression
-                            + "is greater than slaHigh: " + slaHighExpression
-                            + " for cluster: " + cluster.getName()
-                    );
-                }
-
-                // test that slaHigh is less than retention
-                Frequency retentionExpression = cluster.getRetention().getLimit();
-                Date retention = new Date(evaluator.evaluate(retentionExpression.toString(), Long.class));
-                if (slaHigh.after(retention)) {
-                    throw new ValidationException("slaHigh of Feed: " + slaHighExpression
-                            + " is greater than retention of the feed: " + retentionExpression
-                            + " for cluster: " + cluster.getName()
-                    );
-                }
-
-
-            }
-        }
-    }
-
-    private void validateFeedGroups(Feed feed) throws FalconException {
-        String[] groupNames = feed.getGroups() != null ? feed.getGroups().split(",") : new String[]{};
-        final Storage storage = FeedHelper.createStorage(feed);
-        String defaultPath = storage.getUriTemplate(LocationType.DATA);
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            final String uriTemplate = FeedHelper.createStorage(cluster, feed).getUriTemplate(LocationType.DATA);
-            if (!FeedGroup.getDatePattern(uriTemplate).equals(
-                    FeedGroup.getDatePattern(defaultPath))) {
-                throw new ValidationException("Feeds default path pattern: "
-                        + storage.getUriTemplate(LocationType.DATA)
-                        + ", does not match with cluster: "
-                        + cluster.getName()
-                        + " path pattern: "
-                        + uriTemplate);
-            }
-        }
-        for (String groupName : groupNames) {
-            FeedGroup group = FeedGroupMap.get().getGroupsMapping().get(groupName);
-            if (group != null && !group.canContainFeed(feed)) {
-                throw new ValidationException(
-                        "Feed " + feed.getName() + "'s frequency: " + feed.getFrequency().toString()
-                                + ", path pattern: " + storage
-                                + " does not match with group: " + group.getName() + "'s frequency: "
-                                + group.getFrequency()
-                                + ", date pattern: " + group.getDatePattern());
-            }
-        }
-    }
-
-    private void ensureValidityFor(Feed newFeed, Set<Process> processes) throws FalconException {
-        for (Process process : processes) {
-            try {
-                ensureValidityFor(newFeed, process);
-            } catch (FalconException e) {
-                throw new ValidationException(
-                        "Process " + process.getName() + " is not compatible " + "with changes to feed "
-                                + newFeed.getName(), e);
-            }
-        }
-    }
-
-    private void ensureValidityFor(Feed newFeed, Process process) throws FalconException {
-        for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-            String clusterName = cluster.getName();
-            if (process.getInputs() != null) {
-                for (Input input : process.getInputs().getInputs()) {
-                    if (!input.getFeed().equals(newFeed.getName())) {
-                        continue;
-                    }
-                    CrossEntityValidations.validateFeedDefinedForCluster(newFeed, clusterName);
-                    CrossEntityValidations.validateFeedRetentionPeriod(input.getStart(), newFeed, clusterName);
-                    CrossEntityValidations.validateInstanceRange(process, input, newFeed);
-
-                    validateInputPartition(newFeed, input);
-                }
-            }
-
-            if (process.getOutputs() != null) {
-                for (Output output : process.getOutputs().getOutputs()) {
-                    if (!output.getFeed().equals(newFeed.getName())) {
-                        continue;
-                    }
-                    CrossEntityValidations.validateFeedDefinedForCluster(newFeed, clusterName);
-                    CrossEntityValidations.validateInstance(process, output, newFeed);
-                }
-            }
-            LOG.debug("Verified and found {} to be valid for new definition of {}",
-                    process.getName(), newFeed.getName());
-        }
-    }
-
-    private void validateInputPartition(Feed newFeed, Input input) throws FalconException {
-        if (input.getPartition() == null) {
-            return;
-        }
-
-        final Storage.TYPE baseFeedStorageType = FeedHelper.getStorageType(newFeed);
-        if (baseFeedStorageType == Storage.TYPE.FILESYSTEM) {
-            CrossEntityValidations.validateInputPartition(input, newFeed);
-        } else if (baseFeedStorageType == Storage.TYPE.TABLE) {
-            throw new ValidationException("Input partitions are not supported for table storage: " + input.getName());
-        }
-    }
-
-    private void validateClusterValidity(Date start, Date end, String clusterName) throws FalconException {
-        try {
-            if (start.after(end)) {
-                throw new ValidationException("Feed start time: " + start + " cannot be after feed end time: " + end
-                        + " for cluster: " + clusterName);
-            }
-        } catch (ValidationException e) {
-            throw new ValidationException(e);
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    private void validateFeedCutOffPeriod(Feed feed, Cluster cluster) throws FalconException {
-        ExpressionHelper evaluator = ExpressionHelper.get();
-
-        String feedRetention = cluster.getRetention().getLimit().toString();
-        long retentionPeriod = evaluator.evaluate(feedRetention, Long.class);
-
-        if (feed.getLateArrival() == null) {
-            LOG.debug("Feed's late arrival cut-off not set");
-            return;
-        }
-        String feedCutoff = feed.getLateArrival().getCutOff().toString();
-        long feedCutOffPeriod = evaluator.evaluate(feedCutoff, Long.class);
-
-        if (retentionPeriod < feedCutOffPeriod) {
-            throw new ValidationException(
-                    "Feed's retention limit: " + feedRetention + " of referenced cluster " + cluster.getName()
-                            + " should be more than feed's late arrival cut-off period: " + feedCutoff + " for feed: "
-                            + feed.getName());
-        }
-    }
-
-    private void validateFeedPartitionExpression(Feed feed) throws FalconException {
-        int numSourceClusters = 0, numTrgClusters = 0;
-        Set<String> clusters = new HashSet<String>();
-        for (Cluster cl : feed.getClusters().getClusters()) {
-            if (!clusters.add(cl.getName())) {
-                throw new ValidationException("Cluster: " + cl.getName()
-                        + " is defined more than once for feed: " + feed.getName());
-            }
-            if (cl.getType() == ClusterType.SOURCE) {
-                numSourceClusters++;
-            } else if (cl.getType() == ClusterType.TARGET) {
-                numTrgClusters++;
-            }
-        }
-
-        if (numTrgClusters >= 1 && numSourceClusters == 0) {
-            throw new ValidationException("Feed: " + feed.getName()
-                    + " should have atleast one source cluster defined");
-        }
-
-        int feedParts = feed.getPartitions() != null ? feed.getPartitions().getPartitions().size() : 0;
-
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-
-            if (cluster.getType() == ClusterType.SOURCE && numSourceClusters > 1 && numTrgClusters >= 1) {
-                String part = FeedHelper.normalizePartitionExpression(cluster.getPartition());
-                if (StringUtils.split(part, '/').length == 0) {
-                    throw new ValidationException(
-                            "Partition expression has to be specified for cluster " + cluster.getName()
-                                    + " as there are more than one source clusters");
-                }
-                validateClusterExpDefined(cluster);
-
-            } else if (cluster.getType() == ClusterType.TARGET) {
-
-                for (Cluster src : feed.getClusters().getClusters()) {
-                    if (src.getType() == ClusterType.SOURCE) {
-                        String part = FeedHelper.normalizePartitionExpression(src.getPartition(),
-                                cluster.getPartition());
-                        int numParts = StringUtils.split(part, '/').length;
-                        if (numParts > feedParts) {
-                            throw new ValidationException(
-                                    "Partition for " + src.getName() + " and " + cluster.getName()
-                                            + "clusters is more than the number of partitions defined in feed");
-                        }
-                    }
-                }
-
-                if (numTrgClusters > 1 && numSourceClusters >= 1) {
-                    validateClusterExpDefined(cluster);
-                }
-            }
-        }
-    }
-
-    private void validateClusterExpDefined(Cluster cl) throws FalconException {
-        if (cl.getPartition() == null) {
-            return;
-        }
-
-        org.apache.falcon.entity.v0.cluster.Cluster cluster = EntityUtil.getEntity(EntityType.CLUSTER, cl.getName());
-        String part = FeedHelper.normalizePartitionExpression(cl.getPartition());
-        if (FeedHelper.evaluateClusterExp(cluster, part).equals(part)) {
-            throw new ValidationException(
-                    "Alteast one of the partition tags has to be a cluster expression for cluster " + cl.getName());
-        }
-    }
-
-    /**
-     * Ensure table is already defined in the catalog registry.
-     * Does not matter for FileSystem storage.
-     */
-    private void validateFeedStorage(Feed feed) throws FalconException {
-        final Storage.TYPE baseFeedStorageType = FeedHelper.getStorageType(feed);
-        validateMultipleSourcesExist(feed, baseFeedStorageType);
-        validateUniformStorageType(feed, baseFeedStorageType);
-        validatePartitions(feed, baseFeedStorageType);
-        validateStorageExists(feed);
-    }
-
-    private void validateMultipleSourcesExist(Feed feed, Storage.TYPE baseFeedStorageType) throws FalconException {
-        if (baseFeedStorageType == Storage.TYPE.FILESYSTEM) {
-            return;
-        }
-
-        // validate that there is only one source cluster
-        int numberOfSourceClusters = 0;
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            if (cluster.getType() == ClusterType.SOURCE) {
-                numberOfSourceClusters++;
-            }
-        }
-
-        if (numberOfSourceClusters > 1) {
-            throw new ValidationException("Multiple sources are not supported for feed with table storage: "
-                    + feed.getName());
-        }
-    }
-
-    private void validateUniformStorageType(Feed feed, Storage.TYPE feedStorageType) throws FalconException {
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            Storage.TYPE feedClusterStorageType = FeedHelper.getStorageType(feed, cluster);
-
-            if (feedStorageType != feedClusterStorageType) {
-                throw new ValidationException("The storage type is not uniform for cluster: " + cluster.getName());
-            }
-        }
-    }
-
-    private void validateClusterHasRegistry(Feed feed, Cluster cluster) throws FalconException {
-        Storage.TYPE feedClusterStorageType = FeedHelper.getStorageType(feed, cluster);
-
-        if (feedClusterStorageType != Storage.TYPE.TABLE) {
-            return;
-        }
-
-        org.apache.falcon.entity.v0.cluster.Cluster clusterEntity = EntityUtil.getEntity(EntityType.CLUSTER,
-                cluster.getName());
-        if (ClusterHelper.getRegistryEndPoint(clusterEntity) == null) {
-            throw new ValidationException("Cluster should have registry interface defined: " + clusterEntity.getName());
-        }
-    }
-
-    private void validatePartitions(Feed feed, Storage.TYPE storageType) throws  FalconException {
-        if (storageType == Storage.TYPE.TABLE && feed.getPartitions() != null) {
-            throw new ValidationException("Partitions are not supported for feeds with table storage. "
-                    + "It should be defined as part of the table URI. "
-                    + feed.getName());
-        }
-    }
-
-    private void validateStorageExists(Feed feed) throws FalconException {
-        StringBuilder buffer = new StringBuilder();
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity =
-                    EntityUtil.getEntity(EntityType.CLUSTER, cluster.getName());
-            if (!EntityUtil.responsibleFor(clusterEntity.getColo())) {
-                continue;
-            }
-
-            final Storage storage = FeedHelper.createStorage(cluster, feed);
-            // this is only true for table, filesystem always returns true
-            if (storage.getType() == Storage.TYPE.FILESYSTEM) {
-                continue;
-            }
-
-            CatalogStorage catalogStorage = (CatalogStorage) storage;
-            Configuration clusterConf = ClusterHelper.getConfiguration(clusterEntity);
-            if (!CatalogServiceFactory.getCatalogService().tableExists(
-                    clusterConf, catalogStorage.getCatalogUrl(),
-                    catalogStorage.getDatabase(), catalogStorage.getTable())) {
-                buffer.append("Table [")
-                        .append(catalogStorage.getTable())
-                        .append("] does not exist for feed: ")
-                        .append(feed.getName())
-                        .append(" in cluster: ")
-                        .append(cluster.getName());
-            }
-        }
-
-        if (buffer.length() > 0) {
-            throw new ValidationException(buffer.toString());
-        }
-    }
-
-    /**
-     * Validate ACL if authorization is enabled.
-     *
-     * @param feed Feed entity
-     * @throws ValidationException
-     */
-    protected void validateACL(Feed feed) throws FalconException {
-        if (isAuthorizationDisabled) {
-            return;
-        }
-
-        final ACL feedACL = feed.getACL();
-        validateACLOwnerAndGroup(feedACL);
-        try {
-            authorize(feed.getName(), feedACL);
-        } catch (AuthorizationException e) {
-            throw new ValidationException(e);
-        }
-
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity =
-                    EntityUtil.getEntity(EntityType.CLUSTER, cluster.getName());
-            if (!EntityUtil.responsibleFor(clusterEntity.getColo())) {
-                continue;
-            }
-
-            final Storage storage = FeedHelper.createStorage(cluster, feed);
-            try {
-                storage.validateACL(feedACL);
-            } catch(FalconException e) {
-                throw new ValidationException(e);
-            }
-        }
-    }
-
-    /**
-     * Validate Hadoop cluster queue names specified in the Feed entity defintion.
-     *
-     * First tries to look for queue name specified in the Lifecycle, next queueName property
-     * and checks its validity against the Hadoop cluster scheduler info.
-     *
-     * Hadoop cluster queue is validated only if YARN RM webaddress is specified in the
-     * cluster entity properties.
-     *
-     * Throws exception if the specified queue name is not a valid hadoop cluster queue.
-     *
-     * @param feed
-     * @throws FalconException
-     */
-
-    protected void validateHadoopQueue(Feed feed) throws FalconException {
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            Set<String> feedQueue = getQueueNamesUsedInFeed(feed, cluster);
-
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity =
-                    EntityUtil.getEntity(EntityType.CLUSTER, cluster.getName());
-
-            String rmURL = ClusterHelper.getPropertyValue(clusterEntity, "yarn.resourcemanager.webapp.https.address");
-            if (StringUtils.isBlank(rmURL)) {
-                rmURL = ClusterHelper.getPropertyValue(clusterEntity, "yarn.resourcemanager.webapp.address");
-            }
-
-            if (StringUtils.isNotBlank(rmURL)) {
-                LOG.info("Fetching hadoop queue names from cluster {} RM URL {}", cluster.getName(), rmURL);
-                Set<String> queueNames = HadoopQueueUtil.getHadoopClusterQueueNames(rmURL);
-
-                for (String q: feedQueue) {
-                    if (queueNames.contains(q)) {
-                        LOG.info("Validated presence of retention queue specified in feed - {}", q);
-                    } else {
-                        String strMsg = String.format("The hadoop queue name %s specified "
-                                + "for cluster %s is invalid.", q, cluster.getName());
-                        LOG.info(strMsg);
-                        throw new FalconException(strMsg);
-                    }
-                }
-            }
-        }
-    }
-
-    protected Set<String> getQueueNamesUsedInFeed(Feed feed, Cluster cluster) throws FalconException {
-        Set<String> queueList = new HashSet<>();
-        addToQueueList(FeedHelper.getRetentionQueue(feed, cluster), queueList);
-        if (cluster.getType() == ClusterType.TARGET) {
-            addToQueueList(FeedHelper.getReplicationQueue(feed, cluster), queueList);
-        }
-        return queueList;
-    }
-
-    private void addToQueueList(String queueName, Set<String> queueList) {
-        if (StringUtils.isBlank(queueName)) {
-            queueList.add(queueName);
-        }
-    }
-
-    protected void validateProperties(Feed feed) throws ValidationException {
-        Properties properties = feed.getProperties();
-        if (properties == null) {
-            return; // feed has no properties to validate.
-        }
-
-        List<Property> propertyList = feed.getProperties().getProperties();
-        HashSet<String> propertyKeys = new HashSet<String>();
-        for (Property prop : propertyList) {
-            if (StringUtils.isBlank(prop.getName())) {
-                throw new ValidationException("Property name and value cannot be empty for Feed : "
-                        + feed.getName());
-            }
-            if (!propertyKeys.add(prop.getName())) {
-                throw new ValidationException("Multiple properties with same name found for Feed : "
-                        + feed.getName());
-            }
-        }
-    }
-
-    /**
-     * Validate if FileSystem based feed contains location type data.
-     *
-     * @param feed Feed entity
-     * @throws FalconException
-     */
-    private void validateFeedPath(Feed feed) throws FalconException {
-        if (FeedHelper.getStorageType(feed) == Storage.TYPE.TABLE) {
-            return;
-        }
-
-        for (Cluster cluster : feed.getClusters().getClusters()) {
-            List<Location> locations = FeedHelper.getLocations(cluster, feed);
-            Location dataLocation = FileSystemStorage.getLocation(locations, LocationType.DATA);
-
-            if (dataLocation == null) {
-                throw new ValidationException(feed.getName() + " is a FileSystem based feed "
-                    + "but it doesn't contain location type - data in cluster " + cluster.getName());
-            }
-
-        }
-    }
-
-    /**
-     * Validate extraction and merge type combination. Currently supported combo:
-     *
-     * ExtractionType = FULL and MergeType = SNAPSHOT.
-     * ExtractionType = INCREMENTAL and MergeType = APPEND.
-     *
-     * @param feed Feed entity
-     * @param cluster Cluster referenced in the Feed definition
-     * @throws FalconException
-     */
-
-    private void validateFeedExtractionType(Feed feed, Cluster cluster) throws FalconException {
-        Extract extract = cluster.getImport().getSource().getExtract();
-
-        if (ExtractMethod.FULL == extract.getType())  {
-            if ((MergeType.SNAPSHOT != extract.getMergepolicy())
-                    || (extract.getDeltacolumn() != null)) {
-                throw new ValidationException(String.format("Feed %s is using FULL "
-                        + "extract method but specifies either a superfluous "
-                        + "deltacolumn  or a mergepolicy other than snapshot", feed.getName()));
-            }
-        }  else {
-            throw new ValidationException(String.format("Feed %s is using unsupported "
-                    + "extraction mechanism %s", feed.getName(), extract.getType().value()));
-        }
-    }
-
-    /**
-     * Validate improt arguments.
-     * @param feedCluster Cluster referenced in the feed
-     */
-    private void validateFeedImportArgs(Cluster feedCluster) throws FalconException {
-        Map<String, String> args = FeedHelper.getImportArguments(feedCluster);
-        validateSqoopArgs(args);
-    }
-
-    /**
-     * Validate sqoop arguments.
-     * @param args Map<String, String> arguments
-     */
-    private void validateSqoopArgs(Map<String, String> args) throws FalconException {
-        int numMappers = 1;
-        if (args.containsKey("--num-mappers")) {
-            numMappers = Integer.parseInt(args.get("--num-mappers"));
-        }
-        if ((numMappers > 1) && (!args.containsKey("--split-by"))) {
-            throw new ValidationException(String.format("Feed import expects "
-                    + "--split-by column when --num-mappers > 1"));
-        }
-    }
-
-    private void validateFeedImportFieldExcludes(Cluster feedCluster) throws FalconException {
-        if (FeedHelper.isFieldExcludes(feedCluster.getImport().getSource())) {
-            throw new ValidationException(String.format("Field excludes are not supported "
-                    + "currently in Feed import policy"));
-        }
-    }
-
-    /**
-     * Validate export arguments.
-     * @param feedCluster Cluster referenced in the feed
-     */
-    private void validateFeedExportArgs(Cluster feedCluster) throws FalconException {
-        Map<String, String> args = FeedHelper.getExportArguments(feedCluster);
-        Map<String, String> validArgs = new HashMap<>();
-        validArgs.put("--num-mappers", "");
-        validArgs.put("--update-key" , "");
-        validArgs.put("--input-null-string", "");
-        validArgs.put("--input-null-non-string", "");
-
-        for(Map.Entry<String, String> e : args.entrySet()) {
-            if (!validArgs.containsKey(e.getKey())) {
-                throw new ValidationException(String.format("Feed export argument %s is invalid.", e.getKey()));
-            }
-        }
-    }
-
-    private void validateFeedExportFieldExcludes(Cluster feedCluster) throws FalconException {
-        if (FeedHelper.isFieldExcludes(feedCluster.getExport().getTarget())) {
-            throw new ValidationException(String.format("Field excludes are not supported "
-                    + "currently in Feed import policy"));
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/ProcessEntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/ProcessEntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/ProcessEntityParser.java
deleted file mode 100644
index 16fd8b3..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/ProcessEntityParser.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.process.Properties;
-import org.apache.falcon.entity.v0.process.Property;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.LateInput;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.util.DateUtil;
-import org.apache.falcon.util.HadoopQueueUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-
-/**
- * Concrete Parser which has XML parsing and validation logic for Process XML.
- */
-public class ProcessEntityParser extends EntityParser<Process> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ProcessEntityParser.class);
-
-    public ProcessEntityParser() {
-        super(EntityType.PROCESS);
-    }
-
-    @Override
-    public void validate(Process process) throws FalconException {
-        if (process.getTimezone() == null) {
-            process.setTimezone(TimeZone.getTimeZone("UTC"));
-        }
-
-        validateACL(process);
-        // check if dependent entities exists
-        Set<String> clusters = new HashSet<String>();
-        for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-            String clusterName = cluster.getName();
-            if (!clusters.add(cluster.getName())) {
-                throw new ValidationException("Cluster: " + cluster.getName()
-                        + " is defined more than once for process: " + process.getName());
-            }
-            validateEntityExists(EntityType.CLUSTER, clusterName);
-
-            // Optinal end_date
-            if (cluster.getValidity().getEnd() == null) {
-                cluster.getValidity().setEnd(DateUtil.NEVER);
-            }
-
-            validateProcessValidity(cluster.getValidity().getStart(), cluster.getValidity().getEnd());
-            validateHDFSPaths(process, clusterName);
-            validateProperties(process);
-
-            if (process.getInputs() != null) {
-                for (Input input : process.getInputs().getInputs()) {
-                    validateEntityExists(EntityType.FEED, input.getFeed());
-                    Feed feed = ConfigurationStore.get().get(EntityType.FEED, input.getFeed());
-                    CrossEntityValidations.validateFeedDefinedForCluster(feed, clusterName);
-                    CrossEntityValidations.validateFeedRetentionPeriod(input.getStart(), feed, clusterName);
-                    CrossEntityValidations.validateInstanceRange(process, input, feed);
-                    validateInputPartition(input, feed);
-                    validateOptionalInputsForTableStorage(feed, input);
-                }
-            }
-
-            if (process.getOutputs() != null) {
-                for (Output output : process.getOutputs().getOutputs()) {
-                    validateEntityExists(EntityType.FEED, output.getFeed());
-                    Feed feed = ConfigurationStore.get().get(EntityType.FEED, output.getFeed());
-                    CrossEntityValidations.validateFeedDefinedForCluster(feed, clusterName);
-                    CrossEntityValidations.validateInstance(process, output, feed);
-                }
-            }
-        }
-        validateDatasetName(process.getInputs(), process.getOutputs());
-        validateLateInputs(process);
-        validateProcessSLA(process);
-        validateHadoopQueue(process);
-    }
-
-
-    private void validateProcessSLA(Process process) throws FalconException {
-        if (process.getSla() != null) {
-            ExpressionHelper evaluator = ExpressionHelper.get();
-            ExpressionHelper.setReferenceDate(new Date());
-            Frequency shouldStartExpression = process.getSla().getShouldStartIn();
-            Frequency shouldEndExpression = process.getSla().getShouldEndIn();
-            Frequency timeoutExpression = process.getTimeout();
-
-            if (shouldStartExpression != null){
-                Date shouldStart = new Date(evaluator.evaluate(shouldStartExpression.toString(), Long.class));
-
-                if (shouldEndExpression != null) {
-                    Date shouldEnd = new Date(evaluator.evaluate(shouldEndExpression.toString(), Long.class));
-                    if (shouldStart.after(shouldEnd)) {
-                        throw new ValidationException("shouldStartIn of Process: " + shouldStartExpression
-                                + "is greater than shouldEndIn: "
-                                + shouldEndExpression);
-                    }
-                }
-
-                if (timeoutExpression != null) {
-                    Date timeout = new Date(evaluator.evaluate(timeoutExpression.toString(), Long.class));
-                    if (timeout.before(shouldStart)) {
-                        throw new ValidationException("shouldStartIn of Process: " + shouldStartExpression
-                                + " is greater than timeout: " + process.getTimeout());
-                    }
-                }
-            }
-        }
-    }
-    /**
-     * Validate if the user submitting this entity has access to the specific dirs on HDFS.
-     *
-     * @param process process
-     * @param clusterName cluster the process is materialized on
-     * @throws FalconException
-     */
-    private void validateHDFSPaths(Process process, String clusterName) throws FalconException {
-        org.apache.falcon.entity.v0.cluster.Cluster cluster =
-                ConfigurationStore.get().get(EntityType.CLUSTER, clusterName);
-
-        if (!EntityUtil.responsibleFor(cluster.getColo())) {
-            return;
-        }
-
-        String workflowPath = process.getWorkflow().getPath();
-        String libPath = process.getWorkflow().getLib();
-        String nameNode = getNameNode(cluster);
-        try {
-            Configuration configuration = ClusterHelper.getConfiguration(cluster);
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(configuration);
-            if (!fs.exists(new Path(workflowPath))) {
-                throw new ValidationException(
-                        "Workflow path: " + workflowPath + " does not exists in HDFS: " + nameNode);
-            }
-
-            if (StringUtils.isNotBlank(libPath)) {
-                String[] libPaths = libPath.split(EntityUtil.WF_LIB_SEPARATOR);
-                for (String path : libPaths) {
-                    if (!fs.exists(new Path(path))) {
-                        throw new ValidationException("Lib path: " + path + " does not exists in HDFS: " + nameNode);
-                    }
-                }
-            }
-        } catch (IOException e) {
-            throw new FalconException("Error validating workflow path " + workflowPath, e);
-        }
-    }
-
-    private String getNameNode(Cluster cluster) throws ValidationException {
-        // cluster should never be null as it is validated while submitting feeds.
-        if (new Path(ClusterHelper.getStorageUrl(cluster)).toUri().getScheme() == null) {
-            throw new ValidationException(
-                    "Cannot get valid nameNode scheme from write interface of cluster: " + cluster.getName());
-        }
-        return ClusterHelper.getStorageUrl(cluster);
-    }
-
-    private void validateProcessValidity(Date start, Date end) throws FalconException {
-        try {
-            if (!start.before(end)) {
-                throw new ValidationException(
-                        "Process start time: " + start + " should be before process end time: " + end);
-            }
-        } catch (ValidationException e) {
-            throw new ValidationException(e);
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    private void validateInputPartition(Input input, Feed feed) throws FalconException {
-        if (input.getPartition() == null) {
-            return;
-        }
-
-        final Storage.TYPE baseFeedStorageType = FeedHelper.getStorageType(feed);
-        if (baseFeedStorageType == Storage.TYPE.FILESYSTEM) {
-            CrossEntityValidations.validateInputPartition(input, feed);
-        } else if (baseFeedStorageType == Storage.TYPE.TABLE) {
-            throw new ValidationException("Input partitions are not supported for table storage: " + input.getName());
-        }
-    }
-
-    private void validateDatasetName(Inputs inputs, Outputs outputs) throws ValidationException {
-        Set<String> datasetNames = new HashSet<String>();
-        if (inputs != null) {
-            for (Input input : inputs.getInputs()) {
-                if (!datasetNames.add(input.getName())) {
-                    throw new ValidationException("Input name: " + input.getName() + " is already used");
-                }
-            }
-        }
-
-        if (outputs != null) {
-            for (Output output : outputs.getOutputs()) {
-                if (!datasetNames.add(output.getName())) {
-                    throw new ValidationException("Output name: " + output.getName() + " is already used");
-                }
-            }
-        }
-    }
-
-    private void validateLateInputs(Process process) throws ValidationException {
-        if (process.getLateProcess() == null) {
-            return;
-        }
-
-        Map<String, String> feeds = new HashMap<String, String>();
-        if (process.getInputs() != null) {
-            for (Input in : process.getInputs().getInputs()) {
-                feeds.put(in.getName(), in.getFeed());
-            }
-        }
-
-        for (LateInput lp : process.getLateProcess().getLateInputs()) {
-            if (!feeds.keySet().contains(lp.getInput())) {
-                throw new ValidationException("Late Input: " + lp.getInput() + " is not specified in the inputs");
-            }
-
-            try {
-                Feed feed = ConfigurationStore.get().get(EntityType.FEED, feeds.get(lp.getInput()));
-                if (feed.getLateArrival() == null) {
-                    throw new ValidationException(
-                            "Late Input feed: " + lp.getInput() + " is not configured with late arrival cut-off");
-                }
-            } catch (FalconException e) {
-                throw new ValidationException(e);
-            }
-        }
-    }
-
-    private void validateOptionalInputsForTableStorage(Feed feed, Input input) throws FalconException {
-        if (input.isOptional() && FeedHelper.getStorageType(feed) == Storage.TYPE.TABLE) {
-            throw new ValidationException("Optional Input is not supported for feeds with table storage! "
-                    + input.getName());
-        }
-    }
-
-    /**
-     * Validate ACL if authorization is enabled.
-     *
-     * @param process process entity
-     * @throws ValidationException
-     */
-    protected void validateACL(Process process) throws FalconException {
-        if (isAuthorizationDisabled) {
-            return;
-        }
-
-        // Validate the entity owner is logged-in, authenticated user if authorization is enabled
-        ACL processACL = process.getACL();
-        if (processACL == null) {
-            throw new ValidationException("Process ACL cannot be empty for:  " + process.getName());
-        }
-
-        validateACLOwnerAndGroup(processACL);
-
-        try {
-            authorize(process.getName(), processACL);
-        } catch (AuthorizationException e) {
-            throw new ValidationException(e);
-        }
-    }
-
-    protected void validateProperties(Process process) throws ValidationException {
-        Properties properties = process.getProperties();
-        if (properties == null) {
-            return; // Cluster has no properties to validate.
-        }
-
-        List<Property> propertyList = process.getProperties().getProperties();
-        HashSet<String> propertyKeys = new HashSet<String>();
-        for (Property prop : propertyList) {
-            if (StringUtils.isBlank(prop.getName())) {
-                throw new ValidationException("Property name and value cannot be empty for Process : "
-                        + process.getName());
-            }
-            if (!propertyKeys.add(prop.getName())) {
-                throw new ValidationException("Multiple properties with same name found for Process : "
-                        + process.getName());
-            }
-        }
-    }
-
-    private void validateHadoopQueue(Process process) throws FalconException {
-        // get queue name specified in the process entity
-        String processQueueName = null;
-        java.util.Properties props = EntityUtil.getEntityProperties(process);
-        if ((props != null) && (props.containsKey(EntityUtil.MR_QUEUE_NAME))) {
-            processQueueName = props.getProperty(EntityUtil.MR_QUEUE_NAME);
-        } else {
-            return;
-        }
-
-        // iterate through each cluster in process entity to check if the cluster has the process entity queue
-        for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-            String clusterName = cluster.getName();
-            org.apache.falcon.entity.v0.cluster.Cluster clusterEntity =
-                    ConfigurationStore.get().get(EntityType.CLUSTER, clusterName);
-
-            String rmURL = ClusterHelper.getPropertyValue(clusterEntity, "yarn.resourcemanager.webapp.https.address");
-            if (rmURL == null) {
-                rmURL = ClusterHelper.getPropertyValue(clusterEntity, "yarn.resourcemanager.webapp.address");
-            }
-
-            if (rmURL != null) {
-                LOG.info("Fetching hadoop queue names from cluster {} RM URL {}", cluster.getName(), rmURL);
-                Set<String> queueNames = HadoopQueueUtil.getHadoopClusterQueueNames(rmURL);
-
-                if (queueNames.contains(processQueueName)) {
-                    LOG.info("Validated presence of queue {} specified in process "
-                            + "entity for cluster {}", processQueueName, clusterName);
-                } else {
-                    String strMsg = String.format("The hadoop queue name %s specified in process "
-                            + "entity for cluster %s is invalid.", processQueueName, cluster.getName());
-                    LOG.info(strMsg);
-                    throw new FalconException(strMsg);
-                }
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/parser/ValidationException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/ValidationException.java b/common/src/main/java/org/apache/falcon/entity/parser/ValidationException.java
deleted file mode 100644
index 98f1cb9..0000000
--- a/common/src/main/java/org/apache/falcon/entity/parser/ValidationException.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.FalconException;
-
-/**
- * ValidationException during parsing.
- */
-public class ValidationException extends FalconException {
-
-    public ValidationException(String message) {
-        super(message);
-    }
-
-    public ValidationException(Exception e) {
-        super(e);
-    }
-
-    public ValidationException(String message, Exception e) {
-        super(message, e);
-    }
-
-    private static final long serialVersionUID = -4502166408759507355L;
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java b/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
deleted file mode 100644
index 9c7a932..0000000
--- a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
+++ /dev/null
@@ -1,435 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.commons.codec.CharEncoding;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.service.ConfigurationChangeListener;
-import org.apache.falcon.service.FalconService;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URLDecoder;
-import java.net.URLEncoder;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Persistent store for falcon entities.
- */
-public final class ConfigurationStore implements FalconService {
-
-    private static final EntityType[] ENTITY_LOAD_ORDER = new EntityType[] {
-        EntityType.CLUSTER, EntityType.FEED, EntityType.PROCESS, EntityType.DATASOURCE, };
-    public static final EntityType[] ENTITY_DELETE_ORDER = new EntityType[] { EntityType.PROCESS, EntityType.FEED,
-        EntityType.CLUSTER, };
-
-    private static final Logger LOG = LoggerFactory.getLogger(ConfigurationStore.class);
-    private static final Logger AUDIT = LoggerFactory.getLogger("AUDIT");
-    private static final String UTF_8 = CharEncoding.UTF_8;
-    private final boolean shouldPersist;
-
-    private static final FsPermission STORE_PERMISSION =
-            new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
-
-    private Set<ConfigurationChangeListener> listeners = new LinkedHashSet<ConfigurationChangeListener>();
-
-    private ThreadLocal<Entity> updatesInProgress = new ThreadLocal<Entity>();
-
-    private final Map<EntityType, ConcurrentHashMap<String, Entity>> dictionary
-        = new HashMap<EntityType, ConcurrentHashMap<String, Entity>>();
-
-    private static final Entity NULL = new Entity() {
-        @Override
-        public String getName() {
-            return "NULL";
-        }
-
-        @Override
-        public String getTags() { return null; }
-
-        @Override
-        public AccessControlList getACL() {
-            return null;
-        }
-    };
-
-    private static final ConfigurationStore STORE = new ConfigurationStore();
-
-    public static ConfigurationStore get() {
-        return STORE;
-    }
-
-    private FileSystem fs;
-    private Path storePath;
-
-    private ConfigurationStore() {
-        for (EntityType type : EntityType.values()) {
-            dictionary.put(type, new ConcurrentHashMap<String, Entity>());
-        }
-
-        shouldPersist = Boolean.parseBoolean(StartupProperties.get().getProperty("config.store.persist", "true"));
-        if (shouldPersist) {
-            String uri = StartupProperties.get().getProperty("config.store.uri");
-            storePath = new Path(uri);
-            fs = initializeFileSystem();
-        }
-    }
-
-    /**
-     * Falcon owns this dir on HDFS which no one has permissions to read.
-     *
-     * @return FileSystem handle
-     */
-    private FileSystem initializeFileSystem() {
-        try {
-            FileSystem fileSystem =
-                    HadoopClientFactory.get().createFalconFileSystem(storePath.toUri());
-            if (!fileSystem.exists(storePath)) {
-                LOG.info("Creating configuration store directory: {}", storePath);
-                // set permissions so config store dir is owned by falcon alone
-                HadoopClientFactory.mkdirs(fileSystem, storePath, STORE_PERMISSION);
-            }
-
-            return fileSystem;
-        } catch (Exception e) {
-            throw new RuntimeException("Unable to bring up config store for path: " + storePath, e);
-        }
-    }
-
-    @Override
-    public void init() throws FalconException {
-        String listenerClassNames = StartupProperties.get().
-                getProperty("configstore.listeners", "org.apache.falcon.entity.v0.EntityGraph");
-        for (String listenerClassName : listenerClassNames.split(",")) {
-            listenerClassName = listenerClassName.trim();
-            if (listenerClassName.isEmpty()) {
-                continue;
-            }
-            ConfigurationChangeListener listener = ReflectionUtils.getInstanceByClassName(listenerClassName);
-            registerListener(listener);
-        }
-
-        if (shouldPersist) {
-            for (final EntityType type : ENTITY_LOAD_ORDER) {
-                loadEntity(type);
-            }
-        }
-    }
-
-    private void loadEntity(final EntityType type) throws FalconException {
-        try {
-            final ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
-            FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
-            if (files != null) {
-                final ExecutorService service = Executors.newFixedThreadPool(100);
-                for (final FileStatus file : files) {
-                    service.execute(new Runnable() {
-                        @Override
-                        public void run() {
-                            try {
-                                String fileName = file.getPath().getName();
-                                String encodedEntityName = fileName.substring(0, fileName.length() - 4); // drop
-                                // ".xml"
-                                String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
-                                Entity entity = restore(type, entityName);
-                                entityMap.put(entityName, entity);
-                            } catch (IOException | FalconException e) {
-                                LOG.error("Unable to restore entity of", file);
-                            }
-                        }
-                    });
-                }
-                service.shutdown();
-                if (service.awaitTermination(10, TimeUnit.MINUTES)) {
-                    LOG.info("Restored Configurations for entity type: {} ", type.name());
-                } else {
-                    LOG.warn("Time out happened while waiting for all threads to finish while restoring entities "
-                            + "for type: {}", type.name());
-                }
-                // Checking if all entities were loaded
-                if (entityMap.size() != files.length) {
-                    throw new FalconException("Unable to restore configurations for entity type " + type.name());
-                }
-                for (Entity entity : entityMap.values()){
-                    onReload(entity);
-                }
-            }
-        } catch (IOException e) {
-            throw new FalconException("Unable to restore configurations", e);
-        } catch (InterruptedException e) {
-            throw new FalconException("Failed to restore configurations in 10 minutes for entity type " + type.name());
-        }
-    }
-
-    public void registerListener(ConfigurationChangeListener listener) {
-        listeners.add(listener);
-    }
-
-    public void unregisterListener(ConfigurationChangeListener listener) {
-        listeners.remove(listener);
-    }
-
-    /**
-     * @param type   - EntityType that need to be published
-     * @param entity - Reference to the Entity Object
-     * @throws FalconException
-     */
-    public synchronized void publish(EntityType type, Entity entity) throws FalconException {
-        try {
-            if (get(type, entity.getName()) == null) {
-                persist(type, entity);
-                onAdd(entity);
-                dictionary.get(type).put(entity.getName(), entity);
-            } else {
-                throw new EntityAlreadyExistsException(
-                        entity.toShortString() + " already registered with configuration store. "
-                                + "Can't be submitted again. Try removing before submitting.");
-            }
-        } catch (IOException e) {
-            throw new StoreAccessException(e);
-        }
-        AUDIT.info(type + "/" + entity.getName() + " is published into config store");
-    }
-
-    private synchronized void updateInternal(EntityType type, Entity entity) throws FalconException {
-        try {
-            if (get(type, entity.getName()) != null) {
-                persist(type, entity);
-                ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
-                Entity oldEntity = entityMap.get(entity.getName());
-                onChange(oldEntity, entity);
-                entityMap.put(entity.getName(), entity);
-            } else {
-                throw new FalconException(entity.toShortString() + " doesn't exist");
-            }
-        } catch (IOException e) {
-            throw new StoreAccessException(e);
-        }
-        AUDIT.info(type + "/" + entity.getName() + " is replaced into config store");
-    }
-
-    public synchronized void update(EntityType type, Entity entity) throws FalconException {
-        if (updatesInProgress.get() == entity) {
-            updateInternal(type, entity);
-        } else {
-            throw new FalconException(entity.toShortString() + " is not initialized for update");
-        }
-    }
-
-    private void onAdd(Entity entity) throws FalconException {
-        for (ConfigurationChangeListener listener : listeners) {
-            listener.onAdd(entity);
-        }
-    }
-
-    private void onChange(Entity oldEntity, Entity newEntity) throws FalconException {
-        for (ConfigurationChangeListener listener : listeners) {
-            listener.onChange(oldEntity, newEntity);
-        }
-    }
-
-    private void onReload(Entity entity) throws FalconException {
-        for (ConfigurationChangeListener listener : listeners) {
-            listener.onReload(entity);
-        }
-    }
-
-    public synchronized void initiateUpdate(Entity entity) throws FalconException {
-        if (get(entity.getEntityType(), entity.getName()) == null || updatesInProgress.get() != null) {
-            throw new FalconException(
-                    "An update for " + entity.toShortString() + " is already in progress or doesn't exist");
-        }
-        updatesInProgress.set(entity);
-    }
-
-    /**
-     * @param type - Entity type that is being retrieved
-     * @param name - Name as it appears in the entity xml definition
-     * @param <T>  - Actual Entity object type
-     * @return - Entity object from internal dictionary, If the object is not
-     *         loaded in memory yet, it will retrieve it from persistent store
-     *         just in time. On startup all the entities will be added to the
-     *         dictionary with null reference.
-     * @throws FalconException
-     */
-    @SuppressWarnings("unchecked")
-    public <T extends Entity> T get(EntityType type, String name) throws FalconException {
-        ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
-        if (entityMap.containsKey(name)) {
-            if (updatesInProgress.get() != null && updatesInProgress.get().getEntityType() == type
-                    && updatesInProgress.get().getName().equals(name)) {
-                return (T) updatesInProgress.get();
-            }
-            T entity = (T) entityMap.get(name);
-            if (entity == NULL && shouldPersist) { // Object equality being checked
-                try {
-                    entity = this.restore(type, name);
-                } catch (IOException e) {
-                    throw new StoreAccessException(e);
-                }
-                entityMap.put(name, entity);
-                return entity;
-            } else {
-                return entity;
-            }
-        } else {
-            return null;
-        }
-    }
-
-    public Collection<String> getEntities(EntityType type) {
-        return Collections.unmodifiableCollection(dictionary.get(type).keySet());
-    }
-
-    /**
-     * Remove an entity which is already stored in the config store.
-     *
-     * @param type - Entity type being removed
-     * @param name - Name of the entity object being removed
-     * @return - True is remove is successful, false if request entity doesn't
-     *         exist
-     * @throws FalconException
-     */
-    public synchronized boolean remove(EntityType type, String name) throws FalconException {
-        Map<String, Entity> entityMap = dictionary.get(type);
-        if (entityMap.containsKey(name)) {
-            try {
-                archive(type, name);
-                Entity entity = entityMap.get(name);
-                onRemove(entity);
-                entityMap.remove(name);
-            } catch (IOException e) {
-                throw new StoreAccessException(e);
-            }
-            AUDIT.info(type + " " + name + " is removed from config store");
-            return true;
-        }
-        return false;
-    }
-
-    private void onRemove(Entity entity) throws FalconException {
-        for (ConfigurationChangeListener listener : listeners) {
-            listener.onRemove(entity);
-        }
-    }
-
-    /**
-     * @param type   - Entity type that is to be stored into persistent storage
-     * @param entity - entity to persist. JAXB Annotated entity will be marshalled
-     *               to the persistent store. The convention used for storing the
-     *               object:: PROP(config.store.uri)/{entitytype}/{entityname}.xml
-     * @throws java.io.IOException If any error in accessing the storage
-     * @throws FalconException
-     */
-    private void persist(EntityType type, Entity entity) throws IOException, FalconException {
-        if (!shouldPersist) {
-            return;
-        }
-        OutputStream out = fs
-                .create(new Path(storePath,
-                        type + Path.SEPARATOR + URLEncoder.encode(entity.getName(), UTF_8) + ".xml"));
-        try {
-            type.getMarshaller().marshal(entity, out);
-            LOG.info("Persisted configuration {}/{}", type, entity.getName());
-        } catch (JAXBException e) {
-            LOG.error("Unable to serialize the entity object {}/{}", type, entity.getName(), e);
-            throw new StoreAccessException("Unable to serialize the entity object " + type + "/" + entity.getName(), e);
-        } finally {
-            out.close();
-        }
-    }
-
-    /**
-     * Archive removed configuration in the persistent store.
-     *
-     * @param type - Entity type to archive
-     * @param name - name
-     * @throws IOException If any error in accessing the storage
-     */
-    private void archive(EntityType type, String name) throws IOException {
-        if (!shouldPersist) {
-            return;
-        }
-        Path archivePath = new Path(storePath, "archive" + Path.SEPARATOR + type);
-        HadoopClientFactory.mkdirs(fs, archivePath, STORE_PERMISSION);
-        fs.rename(new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(name, UTF_8) + ".xml"),
-                new Path(archivePath, URLEncoder.encode(name, UTF_8) + "." + System.currentTimeMillis()));
-        LOG.info("Archived configuration {}/{}", type, name);
-    }
-
-    /**
-     * @param type - Entity type to restore from persistent store
-     * @param name - Name of the entity to restore.
-     * @param <T>  - Actual entity object type
-     * @return - De-serialized entity object restored from persistent store
-     * @throws IOException     If any error in accessing the storage
-     * @throws FalconException
-     */
-    @SuppressWarnings("unchecked")
-    private synchronized <T extends Entity> T restore(EntityType type, String name)
-        throws IOException, FalconException {
-
-        InputStream in = fs.open(new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(name, UTF_8) + ".xml"));
-        try {
-            return (T) type.getUnmarshaller().unmarshal(in);
-        } catch (JAXBException e) {
-            throw new StoreAccessException("Unable to un-marshall xml definition for " + type + "/" + name, e);
-        } finally {
-            in.close();
-            LOG.info("Restored configuration {}/{}", type, name);
-        }
-    }
-
-    public void cleanupUpdateInit() {
-        updatesInProgress.set(null);
-    }
-
-    @Override
-    public String getName() {
-        return this.getClass().getName();
-    }
-
-    @Override
-    public void destroy() {
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/store/EntityAlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/EntityAlreadyExistsException.java b/common/src/main/java/org/apache/falcon/entity/store/EntityAlreadyExistsException.java
deleted file mode 100644
index 28c5ac0..0000000
--- a/common/src/main/java/org/apache/falcon/entity/store/EntityAlreadyExistsException.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Exception to thrown when entity being sought for addition is already present in config store.
- */
-public class EntityAlreadyExistsException extends FalconException {
-
-    public EntityAlreadyExistsException(Exception e) {
-        super(e);
-    }
-
-    public EntityAlreadyExistsException(String message, Exception e) {
-        super(message, e);
-    }
-
-    public EntityAlreadyExistsException(String message) {
-        super(message);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/store/FeedLocationStore.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/FeedLocationStore.java b/common/src/main/java/org/apache/falcon/entity/store/FeedLocationStore.java
deleted file mode 100644
index a9b7617..0000000
--- a/common/src/main/java/org/apache/falcon/entity/store/FeedLocationStore.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.resource.FeedLookupResult;
-import org.apache.falcon.service.ConfigurationChangeListener;
-import org.apache.falcon.util.DeploymentUtil;
-import org.apache.falcon.util.FalconRadixUtils;
-import org.apache.falcon.util.RadixTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.List;
-
-/**
- *  A <Key, Value> Store to store FeedProperties against Feed's Locations.
- *
- * For example:
- * let's say a feed - <b>MyFeed</b>, is configured for two clusters - cluster1 and cluster2 and has data location as
- * below.
- * /projects/myprocess/data/${MONTH}-${DAY}-${HOUR}
- * /projects/myprocess/meta/${MONTH}-${DAY}-${HOUR}
- *
- * then the key,value store will be like below
- * key1: /projects/myprocess/data/${MONTH}-${DAY}-${HOUR}
- * value1: [FeedProperties("cluster1", LocationType.DATA, "MyFeed"),
- *          FeedProperties("cluster2", LocationType.DATA, "MyFeed")
- *         ]
- *
- * key2: /projects/myprocess/meta/${MONTH}-${DAY}-${HOUR}
- * value2: [FeedProperties("cluster1", LocationType.META, "MyFeed"),
- *          FeedProperties("cluster2", LocationType.META, "MyFeed")
- *         ]
- *
- * It ensures that no two Feeds share the same location.
- * It can also be used for operations like:
- * <ul>
- *     <li>Find if a there is a feed which uses a given path as it's location.</li>
- *     <li>Find name of the feed, given it's location.</li>
- * </ul>
- */
-public final class FeedLocationStore implements ConfigurationChangeListener {
-
-    private static final Logger LOG = LoggerFactory.getLogger(FeedLocationStore.class);
-    protected final FeedPathStore<FeedLookupResult.FeedProperties> store = new
-            RadixTree<FeedLookupResult.FeedProperties>();
-
-    private static FeedLocationStore instance = new FeedLocationStore();
-
-    private FeedLocationStore(){
-    }
-
-    public static FeedLocationStore get(){
-        return instance;
-    }
-
-    @Override
-    public void onAdd(Entity entity) throws FalconException {
-        if (entity.getEntityType() == EntityType.FEED){
-            Feed feed = (Feed) entity;
-            List<Cluster> clusters = feed.getClusters().getClusters();
-            for(Cluster cluster: clusters) {
-                if (DeploymentUtil.getCurrentClusters().contains(cluster.getName())) {
-                    List<Location> clusterSpecificLocations = FeedHelper.getLocations(FeedHelper.getCluster(feed,
-                            cluster.getName()), feed);
-                    if (clusterSpecificLocations != null) {
-                        for (Location location : clusterSpecificLocations) {
-                            if (location != null && StringUtils.isNotBlank(location.getPath())) {
-                                FeedLookupResult.FeedProperties value = new FeedLookupResult.FeedProperties(
-                                        feed.getName(), location.getType(), cluster.getName());
-                                store.insert(StringUtils.trim(location.getPath()), value);
-                                LOG.debug("Inserted location: {} for feed: {} and cluster: {}",
-                                        location.getPath(), feed.getName(), cluster.getName());
-                            }
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    /**
-     * Delete the key(path) from the store if the feed is deleted.
-     * @param entity entity object
-     * @throws FalconException
-     */
-    @Override
-    public void onRemove(Entity entity) throws FalconException {
-        if (entity.getEntityType() == EntityType.FEED){
-
-            Feed feed = (Feed) entity;
-            List<Cluster> clusters = feed.getClusters().getClusters();
-            for(Cluster cluster: clusters){
-                List<Location> clusterSpecificLocations = FeedHelper.getLocations(FeedHelper.getCluster(feed,
-                        cluster.getName()), feed);
-                if (clusterSpecificLocations != null) {
-                    for(Location location: clusterSpecificLocations){
-                        if (location != null && StringUtils.isNotBlank(location.getPath())){
-                            FeedLookupResult.FeedProperties value = new FeedLookupResult.FeedProperties(feed.getName(),
-                                    location.getType(), cluster.getName());
-                            LOG.debug("Delete called for location: {} for feed: {} and cluster: {}",
-                                    location.getPath(), feed.getName(), cluster.getName());
-                            store.delete(location.getPath(), value);
-                            LOG.debug("Deleted location: {} for feed: {} and cluster: {}",
-                                    location.getPath(), feed.getName(), cluster.getName());
-                        }
-                    }
-                }
-            }
-        }
-
-    }
-
-    /**
-     * Delete the old path and insert the new Path when the feed is updated.
-     * @param oldEntity old entity object
-     * @param newEntity updated entity object
-     * @throws FalconException if the new path already exists in the store.
-     */
-    @Override
-    public void onChange(Entity oldEntity, Entity newEntity) throws FalconException {
-        onRemove(oldEntity);
-        onAdd(newEntity);
-    }
-
-    @Override
-    public void onReload(Entity entity) throws FalconException {
-        onAdd(entity);
-    }
-
-
-    public Collection<FeedLookupResult.FeedProperties> reverseLookup(String path) {
-        return store.find(path, new FalconRadixUtils.FeedRegexAlgorithm());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/store/FeedPathStore.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/FeedPathStore.java b/common/src/main/java/org/apache/falcon/entity/store/FeedPathStore.java
deleted file mode 100644
index 1be12fe..0000000
--- a/common/src/main/java/org/apache/falcon/entity/store/FeedPathStore.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.falcon.util.FalconRadixUtils;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.util.Collection;
-
-/**
- * A <Key, Value> Store to store values against Feed Locations.
- *
- * @param <T>
- */
-public interface FeedPathStore<T> {
-
-    void insert(@Nullable String key, @Nonnull T value);
-
-    int getSize();
-
-    @Nullable
-    Collection<T> find(@Nonnull String key, @Nonnull FalconRadixUtils.INodeAlgorithm algorithm);
-
-    @Nullable
-    Collection<T> find(@Nonnull String key);
-
-    boolean delete(@Nonnull String key, @Nonnull T value);
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/store/StoreAccessException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/StoreAccessException.java b/common/src/main/java/org/apache/falcon/entity/store/StoreAccessException.java
deleted file mode 100644
index 318dc2e..0000000
--- a/common/src/main/java/org/apache/falcon/entity/store/StoreAccessException.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.store;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Exception when there in issue accessing the persistent store.
- */
-public class StoreAccessException extends FalconException {
-
-    /**
-     * @param e Exception
-     */
-    public StoreAccessException(String message, Exception e) {
-        super(message, e);
-    }
-
-    public StoreAccessException(Exception e) {
-        super(e);
-    }
-}


[14/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/GraphAssert.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/GraphAssert.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/GraphAssert.java
deleted file mode 100644
index 499cab9..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/GraphAssert.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.response.lineage.Edge;
-import org.apache.falcon.regression.core.response.lineage.EdgesResult;
-import org.apache.falcon.regression.core.response.lineage.GraphResult;
-import org.apache.falcon.regression.core.response.lineage.NODE_TYPE;
-import org.apache.falcon.regression.core.response.lineage.Vertex;
-import org.apache.falcon.regression.core.response.lineage.VerticesResult;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-
-/**
- * util methods for Graph Asserts.
- */
-public final class GraphAssert {
-    private GraphAssert() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(GraphAssert.class);
-
-    /**
-     * Check that the result has certain minimum number of vertices.
-     * @param graphResult the result to be checked
-     * @param minNumOfVertices required number of vertices
-     */
-    public static void checkVerticesPresence(final GraphResult graphResult,
-                                             final int minNumOfVertices) {
-        Assert.assertTrue(graphResult.getTotalSize() >= minNumOfVertices,
-            "graphResult should have at least " + minNumOfVertices + " vertex");
-    }
-
-    /**
-     * Check that the vertices in the result are sane.
-     * @param verticesResult the result to be checked
-     */
-    public static void assertVertexSanity(final VerticesResult verticesResult) {
-        Assert.assertEquals(verticesResult.getResults().size(), verticesResult.getTotalSize(),
-            "Size of vertices don't match");
-        for (Vertex vertex : verticesResult.getResults()) {
-            Assert.assertNotNull(vertex.getId(),
-                "id of the vertex should be non-null: " + vertex);
-            Assert.assertEquals(vertex.getNodeType(), NODE_TYPE.VERTEX,
-                "_type of the vertex should be non-null: " + vertex);
-            Assert.assertNotNull(vertex.getName(),
-                "name of the vertex should be non-null: " + vertex);
-            Assert.assertNotNull(vertex.getType(),
-                "type of the vertex should be non-null: " + vertex);
-            Assert.assertNotNull(vertex.getTimestamp(),
-                "timestamp of the vertex should be non-null: " + vertex);
-        }
-    }
-
-    /**
-     * Check that edges in the result are sane.
-     * @param edgesResult result to be checked
-     */
-    public static void assertEdgeSanity(final EdgesResult edgesResult) {
-        Assert.assertEquals(edgesResult.getResults().size(), edgesResult.getTotalSize(),
-            "Size of edges don't match");
-        for (Edge edge : edgesResult.getResults()) {
-            assertEdgeSanity(edge);
-        }
-    }
-
-    /**
-     * Check that edge is sane.
-     * @param edge edge to be checked
-     */
-    public static void assertEdgeSanity(Edge edge) {
-        Assert.assertNotNull(edge.getId(), "id of an edge can't be null: " + edge);
-        Assert.assertEquals(edge.getNodeType(), NODE_TYPE.EDGE,
-            "_type of an edge can't be null: " + edge);
-        Assert.assertNotNull(edge.getLabel(), "_label of an edge can't be null: " + edge);
-        Assert.assertTrue(edge.getInV() > 0, "_inV of an edge can't be null: " + edge);
-        Assert.assertTrue(edge.getOutV() > 0, "_outV of an edge can't be null: " + edge);
-    }
-
-    /**
-     * Check that user vertex is present.
-     * @param verticesResult the result to be checked
-     */
-    public static void assertUserVertexPresence(final VerticesResult verticesResult) {
-        checkVerticesPresence(verticesResult, 1);
-        for(Vertex vertex : verticesResult.getResults()) {
-            if (vertex.getType() == Vertex.VERTEX_TYPE.USER
-                    && vertex.getName().equals(MerlinConstants.CURRENT_USER_NAME)) {
-                return;
-            }
-        }
-        Assert.fail(String.format("Vertex corresponding to user: %s is not present.",
-            MerlinConstants.CURRENT_USER_NAME));
-    }
-
-    /**
-     * Check that a vertex of a certain name is present.
-     * @param verticesResult the result to be checked
-     * @param name expected name
-     */
-    public static void assertVertexPresence(final VerticesResult verticesResult, final String name) {
-        checkVerticesPresence(verticesResult, 1);
-        for (Vertex vertex : verticesResult.getResults()) {
-            if (vertex.getName().equals(name)) {
-                return;
-            }
-        }
-        Assert.fail(String.format("Vertex of name: %s is not present.", name));
-    }
-
-    /**
-     * Check that the result has at least a certain number of vertices of a certain type.
-     * @param verticesResult the result to be checked
-     * @param vertexType vertex type
-     * @param minOccurrence required number of vertices
-     */
-    public static void assertVerticesPresenceMinOccur(final VerticesResult verticesResult,
-                                                      final Vertex.VERTEX_TYPE vertexType,
-                                                      final int minOccurrence) {
-        int occurrence = 0;
-        for(Vertex vertex : verticesResult.getResults()) {
-            if (vertex.getType() == vertexType) {
-                LOGGER.info("Found vertex: " + vertex);
-                occurrence++;
-                if (occurrence >= minOccurrence) {
-                    return;
-                }
-            }
-        }
-        Assert.fail(String.format("Expected at least %d vertices of type %s. But found only %d",
-            minOccurrence, vertexType, occurrence));
-    }
-
-    /**
-     * Check result to contain at least a certain number of edges of a certain type.
-     * @param edgesResult result to be checked
-     * @param edgeLabel edge label
-     * @param minOccurrence required number of edges
-     */
-    public static void assertEdgePresenceMinOccur(final EdgesResult edgesResult,
-                                                  final Edge.LabelType edgeLabel,
-                                                  final int minOccurrence) {
-        int occurrence = 0;
-        for(Edge edge : edgesResult.getResults()) {
-            if (edge.getLabel() == edgeLabel) {
-                LOGGER.info("Found edge: " + edge);
-                occurrence++;
-                if (occurrence >= minOccurrence) {
-                    return;
-                }
-            }
-        }
-        Assert.fail(String.format("Expected at least %d vertices of type %s. But found only %d",
-            minOccurrence, edgeLabel, occurrence));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HCatUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HCatUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HCatUtil.java
deleted file mode 100644
index 1b463cd..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HCatUtil.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hive.hcatalog.api.HCatAddPartitionDesc;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.apache.hive.hcatalog.common.HCatException;
-import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
-import org.testng.Assert;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * util methods for HCat.
- */
-public final class HCatUtil {
-    private HCatUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static HCatClient getHCatClient(String hCatEndPoint, String hiveMetaStorePrinciple)
-        throws HCatException {
-        HiveConf hcatConf = new HiveConf();
-        hcatConf.set("hive.metastore.local", "false");
-        hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, hCatEndPoint);
-        hcatConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, hiveMetaStorePrinciple);
-        hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, MerlinConstants.IS_SECURE);
-        hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-            HCatSemanticAnalyzer.class.getName());
-        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-        return HCatClient.create(hcatConf);
-    }
-
-    public static void addPartitionsToTable(HCatClient clusterHC, List<String> partitions,
-        List<String> partitionLocations, String partitionCol, String dbName, String tableName) throws HCatException {
-        Assert.assertEquals(partitions.size(), partitionLocations.size(),
-                "Number of locations is not same as number of partitions.");
-        final List<HCatAddPartitionDesc> partitionDesc = new ArrayList<>();
-        for (int i = 0; i < partitions.size(); ++i) {
-            final String partition = partitions.get(i);
-            final Map<String, String> onePartition = new HashMap<>();
-            onePartition.put(partitionCol, partition);
-            final String partitionLoc = partitionLocations.get(i);
-            partitionDesc.add(HCatAddPartitionDesc.create(dbName, tableName, partitionLoc, onePartition).build());
-        }
-        clusterHC.addPartitions(partitionDesc);
-    }
-
-    @SuppressWarnings("deprecation")
-    public static HCatFieldSchema getStringSchema(String fieldName, String comment) throws HCatException {
-        return new HCatFieldSchema(fieldName, HCatFieldSchema.Type.STRING, comment);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HadoopUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HadoopUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HadoopUtil.java
deleted file mode 100644
index a3b059e..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HadoopUtil.java
+++ /dev/null
@@ -1,569 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.log4j.Logger;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.regex.Pattern;
-
-/**
- * Util methods related to hadoop.
- */
-public final class HadoopUtil {
-
-    public static final String SOMETHING_RANDOM = "somethingRandom";
-    private static final Logger LOGGER = Logger.getLogger(HadoopUtil.class);
-    private static Pattern protocol = Pattern.compile(":[\\d]+/");
-
-    private HadoopUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    /*
-     * Removes 'hdfs(hftp)://server:port'
-     */
-    public static String cutProtocol(String path) {
-        if (StringUtils.isNotEmpty(path)) {
-            if (protocol.matcher(path).find()) {
-                return '/' + protocol.split(path)[1];
-            }
-        }
-        return path;
-    }
-
-    public static String joinPath(String basePath, String... restParts) {
-        final String separator = "/";
-        List<String> cleanParts = new ArrayList<>();
-        String cleanBasePath = basePath.replaceFirst(separator + "$", "");
-        cleanParts.add(cleanBasePath);
-        for (String onePart : restParts) {
-            final String cleanPart = onePart.replaceFirst("^" + separator, "").replaceFirst(separator + "$", "");
-            cleanParts.add(cleanPart);
-        }
-        return StringUtils.join(cleanParts, separator);
-    }
-
-    /**
-     * Retrieves all file names contained in a given directory.
-     * @param fs filesystem
-     * @param location given directory
-     * @return list of file names
-     * @throws IOException
-     */
-    public static List<String> getAllFilesHDFS(FileSystem fs, Path location) throws IOException {
-        List<String> files = new ArrayList<>();
-        if (!fs.exists(location)) {
-            return files;
-        }
-        FileStatus[] stats = fs.listStatus(location);
-        for (FileStatus stat : stats) {
-            if (!isDir(stat)) {
-                files.add(stat.getPath().toString());
-            }
-        }
-        return files;
-    }
-
-    /**
-     * Retrieves all directories withing a given depth starting from a specific dir.
-     * @param fs filesystem
-     * @param location given dir
-     * @param depth depth
-     * @return all matching directories
-     * @throws IOException
-     */
-    public static List<Path> getAllDirsRecursivelyHDFS(
-        FileSystem fs, Path location, int depth) throws IOException {
-        List<Path> returnList = new ArrayList<>();
-        FileStatus[] stats = fs.listStatus(location);
-        for (FileStatus stat : stats) {
-            if (isDir(stat)) {
-                returnList.add(stat.getPath());
-                if (depth > 0) {
-                    returnList.addAll(getAllDirsRecursivelyHDFS(fs, stat.getPath(), depth - 1));
-                }
-            }
-        }
-        return returnList;
-    }
-
-    /**
-     * Recursively retrieves all data file names from a given location.
-     * @param fs filesystem
-     * @param location given location
-     * @return list of all files
-     * @throws IOException
-     */
-    public static List<Path> getAllFilesRecursivelyHDFS(
-        FileSystem fs, Path location) throws IOException {
-        List<Path> returnList = new ArrayList<>();
-        RemoteIterator<LocatedFileStatus> remoteIterator;
-        try {
-            remoteIterator = fs.listFiles(location, true);
-        } catch (FileNotFoundException e) {
-            LOGGER.info("Path '" + location + "' is not found on " + fs.getUri());
-            return returnList;
-        }
-        while(remoteIterator.hasNext()) {
-            Path path = remoteIterator.next().getPath();
-            if (!path.toUri().toString().contains("_SUCCESS")) {
-                returnList.add(path);
-            }
-        }
-        return returnList;
-    }
-
-    /**
-     * Recursively retrieves all data file names from a given location and looks for presence of availabilityFlag.
-     * If availabilityFlag is null then it looks for _SUCCESS file(set as default).
-     * @param fs filesystem
-     * @param location given location
-     * @param availabilityFlag value of availability flag set in entity
-     * @return
-     * @throws IOException
-     */
-    public static boolean getSuccessFolder(
-            FileSystem fs, Path location, String availabilityFlag) throws IOException {
-        LOGGER.info("location : " + location);
-        for(FileStatus stat : fs.listStatus(location)) {
-            if (availabilityFlag.isEmpty()) {
-                if (stat.getPath().getName().equals("_SUCCESS")) {
-                    return true;
-                }
-            } else {
-                if (stat.getPath().getName().equals(availabilityFlag)) {
-                    return true;
-                }
-            }
-        }
-        return false;
-    }
-
-    @SuppressWarnings("deprecation")
-    private static boolean isDir(FileStatus stat) {
-        return stat.isDir();
-    }
-
-    /**
-     * Copies file from local place to hdfs location.
-     * @param fs target filesystem
-     * @param dstHdfsDir destination
-     * @param srcFileLocation source location
-     * @throws IOException
-     */
-    public static void copyDataToFolder(final FileSystem fs, String dstHdfsDir,
-                                        final String srcFileLocation)
-        throws IOException {
-        LOGGER.info(String.format("Copying local dir %s to hdfs location %s on %s",
-            srcFileLocation, dstHdfsDir, fs.getUri()));
-        fs.copyFromLocalFile(new Path(srcFileLocation), new Path(cutProtocol(dstHdfsDir)));
-    }
-
-    /**
-     * Copies a whole directory to hdfs.
-     * @param fs target filesystem
-     * @param dstHdfsDir destination dir
-     * @param localLocation source location
-     * @throws IOException
-     */
-    public static void uploadDir(final FileSystem fs, final String dstHdfsDir,
-                                 final String localLocation)
-        throws IOException {
-        LOGGER.info(String.format("Uploading local dir %s to hdfs location %s", localLocation,
-            dstHdfsDir));
-        HadoopUtil.deleteDirIfExists(dstHdfsDir, fs);
-        HadoopUtil.copyDataToFolder(fs, dstHdfsDir, localLocation);
-    }
-
-    /**
-     * Copies given data to hdfs location.
-     * @param fs target filesystem
-     * @param dstHdfsDir destination dir
-     * @param data source location
-     * @param overwrite do we want to overwrite the data
-     * @throws IOException
-     */
-    public static void writeDataForHive(final FileSystem fs, final String dstHdfsDir,
-        final CharSequence data, boolean overwrite) throws IOException {
-        LOGGER.info(String.format("Writing data %s to hdfs location %s", data, dstHdfsDir));
-        final File tempFile = File.createTempFile(UUID.randomUUID().toString().split("-")[0], ".dat");
-        FileUtils.write(tempFile, data);
-        if (overwrite) {
-            HadoopUtil.deleteDirIfExists(dstHdfsDir, fs);
-        }
-        try {
-            fs.mkdirs(new Path(dstHdfsDir));
-        } catch (Exception e) {
-            //ignore
-        }
-        fs.setPermission(new Path(dstHdfsDir), FsPermission.getDirDefault());
-        HadoopUtil.copyDataToFolder(fs, dstHdfsDir, tempFile.getAbsolutePath());
-        if (!tempFile.delete()) {
-            LOGGER.warn("Deletion of " + tempFile + " failed.");
-        }
-    }
-
-    /**
-     * Lists names of given directory subfolders.
-     * @param fs filesystem
-     * @param baseDir given directory
-     * @return list of subfolders
-     * @throws IOException
-     */
-    public static List<String> getHDFSSubFoldersName(FileSystem fs,
-                                                     String baseDir) throws IOException {
-        List<String> returnList = new ArrayList<>();
-        FileStatus[] stats = fs.listStatus(new Path(baseDir));
-        for (FileStatus stat : stats) {
-            if (isDir(stat)) {
-                returnList.add(stat.getPath().getName());
-            }
-        }
-        return returnList;
-    }
-
-    /**
-     * Checks if file is present in given directory.
-     * @param fs filesystem
-     * @param hdfsPath path to a given directory
-     * @param fileToCheckFor file
-     * @return either file present or not
-     * @throws IOException
-     */
-    public static boolean isFilePresentHDFS(FileSystem fs, String hdfsPath, String fileToCheckFor)
-        throws IOException {
-        LOGGER.info("getting file from folder: " + hdfsPath);
-        List<String> fileNames = getAllFileNamesFromHDFS(fs, hdfsPath);
-        for (String filePath : fileNames) {
-            if (filePath.contains(fileToCheckFor)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Lists all file names for a given directory.
-     * @param fs filesystem
-     * @param hdfsPath path to a given directory
-     * @return list of files which given directory contains
-     * @throws IOException
-     */
-    private static List<String> getAllFileNamesFromHDFS(
-        FileSystem fs, String hdfsPath) throws IOException {
-        List<String> returnList = new ArrayList<>();
-        LOGGER.info("getting file from folder: " + hdfsPath);
-        FileStatus[] stats = fs.listStatus(new Path(hdfsPath));
-        for (FileStatus stat : stats) {
-            String currentPath = stat.getPath().toUri().getPath(); // gives directory name
-            if (!isDir(stat)) {
-                returnList.add(currentPath);
-            }
-        }
-        return returnList;
-    }
-
-    /**
-     * Removes directory with a given name and creates empty one with the same name.
-     * @param fs filesystem
-     * @param path path to a directory
-     * @throws IOException
-     */
-    public static void recreateDir(FileSystem fs, String path) throws IOException {
-        deleteDirIfExists(path, fs);
-        LOGGER.info("creating hdfs dir: " + path + " on " + fs.getConf().get("fs.default.name"));
-        fs.mkdirs(new Path(path));
-    }
-
-    /**
-     * Recreates dirs for a list of filesystems.
-     * @param fileSystems list of filesystems
-     * @param path path to a directory
-     * @throws IOException
-     */
-    public static void recreateDir(List<FileSystem> fileSystems, String path) throws IOException {
-        for (FileSystem fs : fileSystems) {
-            recreateDir(fs, path);
-        }
-    }
-
-    /**
-     * Removes given directory from a filesystem.
-     * @param hdfsPath path to a given directory
-     * @param fs filesystem
-     * @throws IOException
-     */
-    public static void deleteDirIfExists(String hdfsPath, FileSystem fs) throws IOException {
-        Path path = new Path(hdfsPath);
-        if (fs.exists(path)) {
-            LOGGER.info(String.format("Deleting HDFS path: %s on %s", path, fs.getUri()));
-            fs.delete(path, true);
-        } else {
-            LOGGER.info(String.format(
-                "Not deleting non-existing HDFS path: %s on %s", path, fs.getUri()));
-        }
-    }
-
-    /**
-     * Copies data in folders without prefix.
-     * @param fs filesystem
-     * @param inputPath source location
-     * @param remoteLocations destination location
-     * @throws IOException
-     */
-    public static void flattenAndPutDataInFolder(FileSystem fs, String inputPath,
-                                                 List<String> remoteLocations) throws IOException {
-        flattenAndPutDataInFolder(fs, inputPath, "", remoteLocations);
-    }
-
-    /**
-     * Copies files from a source directory to target directories on hdfs.
-     * @param fs target filesystem
-     * @param inputPath source location
-     * @param remotePathPrefix prefix for target directories
-     * @param remoteLocations target directories
-     * @return list of exact locations where data was copied
-     * @throws IOException
-     */
-    public static List<String> flattenAndPutDataInFolder(FileSystem fs, String inputPath,
-                                                 String remotePathPrefix,
-                                                 List<String> remoteLocations) throws IOException {
-        if (StringUtils.isNotEmpty(remotePathPrefix)) {
-            deleteDirIfExists(remotePathPrefix, fs);
-        }
-        LOGGER.info("Creating data in folders: \n" + remoteLocations);
-        File input = new File(inputPath);
-        File[] files = input.isDirectory() ? input.listFiles() : new File[]{input};
-        List<Path> filePaths = new ArrayList<>();
-        assert files != null;
-        for (final File file : files) {
-            if (!file.isDirectory()) {
-                final Path filePath = new Path(file.getAbsolutePath());
-                filePaths.add(filePath);
-            }
-        }
-        if (!remotePathPrefix.endsWith("/") && !remoteLocations.get(0).startsWith("/")) {
-            remotePathPrefix += "/";
-        }
-        List<String> locations = new ArrayList<>();
-        for (String remoteDir : remoteLocations) {
-            String remoteLocation = remotePathPrefix + remoteDir;
-            remoteLocation = cutProtocol(remoteLocation);
-            locations.add(remoteLocation);
-            LOGGER.info(String.format("copying to: %s files: %s",
-                fs.getUri() + remoteLocation, Arrays.toString(files)));
-            if (!fs.exists(new Path(remoteLocation))) {
-                fs.mkdirs(new Path(remoteLocation));
-            }
-            fs.copyFromLocalFile(false, true, filePaths.toArray(new Path[filePaths.size()]),
-                new Path(remoteLocation));
-        }
-        return locations;
-    }
-
-    /**
-     * Copies data from local sources to remote directories.
-     * @param fs target filesystem
-     * @param folderPrefix prefix for remote directories
-     * @param folderList remote directories
-     * @param fileLocations sources
-     * @throws IOException
-     */
-    public static void copyDataToFolders(FileSystem fs, final String folderPrefix,
-        List<String> folderList, String... fileLocations) throws IOException {
-        for (final String folder : folderList) {
-            String folderSpace = folder.replaceAll("/", "_");
-            File file = new File(OSUtil.NORMAL_INPUT + folderSpace + ".txt");
-            FileUtils.writeStringToFile(file, "folder", true);
-            fs.copyFromLocalFile(new Path(file.getAbsolutePath()), new Path(folderPrefix + folder));
-            if (!file.delete()) {
-                LOGGER.info("delete was not successful for file: " + file);
-            }
-            Path[] srcPaths = new Path[fileLocations.length];
-            for (int i = 0; i < srcPaths.length; ++i) {
-                srcPaths[i] = new Path(fileLocations[i]);
-            }
-            LOGGER.info(String.format("copying  %s to %s%s on %s", Arrays.toString(srcPaths),
-                folderPrefix, folder, fs.getUri()));
-            fs.copyFromLocalFile(false, true, srcPaths, new Path(folderPrefix + folder));
-        }
-    }
-
-    /**
-     * Uploads data to remote directories with names within date ranges.
-     * @param fs target filesystem
-     * @param interval dates ranges before and after current date
-     * @param minuteSkip time to skip within a range to get intermediate directories
-     * @param folderPrefix prefix for remote directories
-     * @throws IOException
-     */
-    public static void lateDataReplenish(FileSystem fs, int interval,
-        int minuteSkip, String folderPrefix) throws IOException {
-        List<String> folderData = TimeUtil.getMinuteDatesOnEitherSide(interval, minuteSkip);
-        folderData.add(SOMETHING_RANDOM);
-        flattenAndPutDataInFolder(fs, OSUtil.NORMAL_INPUT, folderPrefix, folderData);
-    }
-
-    /**
-     * Creates list of folders on remote filesystem.
-     * @param fs remote filesystem
-     * @param folderPrefix prefix for remote directories
-     * @param folderList list of folders
-     * @throws IOException
-     */
-    public static void createFolders(FileSystem fs, final String folderPrefix,
-                                             List<String> folderList) throws IOException {
-        for (final String folder : folderList) {
-            final String pathString = cutProtocol(folderPrefix + folder);
-            LOGGER.info("Creating " + fs.getUri() + "/" + pathString);
-            fs.mkdirs(new Path(pathString));
-        }
-    }
-
-    /**
-     * Created folders in remote location according to current time and copies files here.
-     * @param fs target filesystem
-     * @param remoteLocation remote location
-     * @param localLocation source
-     * @throws IOException
-     */
-    public static void injectMoreData(FileSystem fs, final String remoteLocation,
-                                      String localLocation) throws IOException {
-        File[] files = new File(localLocation).listFiles();
-        assert files != null;
-        for (final File file : files) {
-            if (!file.isDirectory()) {
-                String path = remoteLocation + "/" + System.currentTimeMillis() / 1000 + "/";
-                LOGGER.info("inserting data@ " + path);
-                fs.copyFromLocalFile(new Path(file.getAbsolutePath()), new Path(path));
-            }
-        }
-
-    }
-
-    /**
-     * Uploads either _SUCCESS or dataFile4.txt file to remote directories with names within date
-     * ranges.
-     * @param fs target filesystem
-     * @param interval dates ranges before and after current date
-     * @param minuteSkip time to skip within a range to get intermediate directories
-     * @param folderPrefix prefix for remote directories
-     * @param fileToBePut what file to copy to remote locations
-     * @throws IOException
-     */
-    public static void putFileInFolderHDFS(FileSystem fs, int interval, int minuteSkip,
-                                           String folderPrefix, String fileToBePut)
-        throws IOException {
-        List<String> folderPaths = TimeUtil.getMinuteDatesOnEitherSide(interval, minuteSkip);
-        LOGGER.info("folderData: " + folderPaths.toString());
-        createFolders(fs, folderPrefix, folderPaths);
-        if (fileToBePut.equals("_SUCCESS")) {
-            copyDataToFolders(fs, folderPrefix, folderPaths, OSUtil.concat(OSUtil.NORMAL_INPUT, "_SUCCESS"));
-        } else {
-            copyDataToFolders(fs, folderPrefix, folderPaths, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile4.txt"));
-        }
-    }
-
-    /**
-     * Uploads dataFile4.txt file to remote directories with names within date ranges.
-     * @param fs target filesystem
-     * @param interval dates ranges before and after current date
-     * @param minuteSkip time to skip within a range to get intermediate directories
-     * @param folderPrefix prefix for remote directories
-     * @param postFix postfix for remote locations
-     * @throws IOException
-     */
-    public static void lateDataReplenishWithoutSuccess(FileSystem fs, int interval,
-        int minuteSkip, String folderPrefix, String postFix) throws IOException {
-        List<String> folderPaths = TimeUtil.getMinuteDatesOnEitherSide(interval, minuteSkip);
-        LOGGER.info("folderData: " + folderPaths.toString());
-        if (postFix != null) {
-            for (int i = 0; i < folderPaths.size(); i++) {
-                folderPaths.set(i, folderPaths.get(i) + postFix);
-            }
-        }
-        createFolders(fs, folderPrefix, folderPaths);
-        copyDataToFolders(fs, folderPrefix, folderPaths, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile4.txt"));
-    }
-
-    /**
-     * Uploads both dataFile4.txt and _SUCCESS files to remote directories with names within date
-     * ranges.
-     * @param fs target filesystem
-     * @param interval dates ranges before and after current date
-     * @param minuteSkip time to skip within a range to get intermediate directories
-     * @param folderPrefix prefix for remote directories
-     * @param postFix postfix for remote locations
-     * @throws IOException
-     */
-    public static void lateDataReplenish(FileSystem fs, int interval, int minuteSkip,
-                                         String folderPrefix, String postFix) throws IOException {
-        List<String> folderPaths = TimeUtil.getMinuteDatesOnEitherSide(interval, minuteSkip);
-        LOGGER.info("folderData: " + folderPaths.toString());
-        if (postFix != null) {
-            for (int i = 0; i < folderPaths.size(); i++) {
-                folderPaths.set(i, folderPaths.get(i) + postFix);
-            }
-        }
-        createFolders(fs, folderPrefix, folderPaths);
-        copyDataToFolders(fs, folderPrefix, folderPaths,
-            OSUtil.concat(OSUtil.NORMAL_INPUT, "_SUCCESS"),
-            OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile4.txt"));
-    }
-
-    /**
-     * Creates empty folders in hdfs.
-     * @param helper target
-     * @param folderList list of folders
-     * @throws IOException
-     * @deprecated method creates filesystem object by itself. We should pass existing FileSystem
-     * object to such methods.
-     */
-    @Deprecated
-    public static void createHDFSFolders(ColoHelper helper, List<String> folderList)
-        throws IOException {
-        LOGGER.info("creating folders.....");
-        Configuration conf = new Configuration();
-        conf.set("fs.default.name", "hdfs://" + helper.getFeedHelper().getHadoopURL());
-        final FileSystem fs = FileSystem.get(conf);
-        for (final String folder : folderList) {
-            if (StringUtils.isNotEmpty(folder)) {
-                fs.mkdirs(new Path(cutProtocol(folder)));
-            }
-        }
-        LOGGER.info("created folders.....");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveAssert.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveAssert.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveAssert.java
deleted file mode 100644
index 2a934b5..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveAssert.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.HCatDatabase;
-import org.apache.hive.hcatalog.api.HCatPartition;
-import org.apache.hive.hcatalog.api.HCatTable;
-import org.apache.hive.hcatalog.common.HCatException;
-import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
-import org.apache.log4j.Logger;
-import org.testng.asserts.SoftAssert;
-
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-/** Assertions for to Hive objects. */
-public final class HiveAssert {
-    private HiveAssert() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    private static final Logger LOGGER = Logger.getLogger(HiveAssert.class);
-
-    /**
-     * Assertion for column equality - it also covers stuff that is not covered by
-     * HCatFieldSchema.equals().
-     * @param columns1 first column for comparison
-     * @param columns2 second column for comparison
-     * @param softAssert object to use for performing assertion
-     * @return object used for performing assertion
-     */
-    public static SoftAssert assertColumnListEqual(List<HCatFieldSchema> columns1,
-                                                   List<HCatFieldSchema> columns2,
-                                                   SoftAssert softAssert) {
-        softAssert.assertEquals(columns1, columns2, "List of columns for two tables are not same");
-        for (int i = 0; i < columns1.size(); ++i) {
-            HCatFieldSchema column1 = columns1.get(i);
-            HCatFieldSchema column2 = columns2.get(i);
-            softAssert.assertEquals(column2.getComment(), column1.getComment(),
-                "Comments of the columns: " + column1 + " & " + column2 + " is not same");
-        }
-        return softAssert;
-    }
-
-    /**
-     * Assertion for equality of partitions - equality using HCatPartition.equals() is not
-     * satisfactory for our purpose.
-     * @param table1Partitions first list of partitions for comparison
-     * @param table2Partitions second list of partitions for comparison
-     * @param softAssert object to use for performing assertion
-     * @return object used for performing assertion
-     */
-    public static SoftAssert assertPartitionListEqual(List<HCatPartition> table1Partitions,
-        List<HCatPartition> table2Partitions, SoftAssert softAssert) {
-        softAssert.assertEquals(table1Partitions.size(), table2Partitions.size(),
-            "Number of partitions are not same");
-        try {
-            for (int i = 0; i < table1Partitions.size(); i++) {
-                final HCatPartition table1Partition = table1Partitions.get(i);
-                final HCatPartition table2Partition = table2Partitions.get(i);
-                softAssert.assertEquals(table2Partition.getValues(), table1Partition.getValues(),
-                    "Partitions don't have same values");
-            }
-        } catch (Exception e) {
-            softAssert.fail("Couldn't do partition equality.", e);
-        }
-        return softAssert;
-    }
-
-    /**
-     * Assertion for equality of two tables (including table properties and table type).
-     * @param cluster1 the ColoHelper of first cluster
-     * @param table1 the first table
-     * @param cluster2 the ColoHelper of second cluster
-     * @param table2 the second table
-     * @param softAssert object used for performing assertion
-     * @return object used for performing assertion
-     * @throws java.io.IOException
-     */
-    public static SoftAssert assertTableEqual(ColoHelper cluster1, HCatTable table1,
-                                              ColoHelper cluster2, HCatTable table2,
-                                              SoftAssert softAssert) throws IOException {
-        return assertTableEqual(cluster1, table1, cluster2, table2, softAssert, true);
-    }
-
-    /**
-     * Assertion for equality of two tables.
-     * @param cluster1 the ColoHelper of first cluster
-     * @param table1 the first table (expected values)
-     * @param cluster2 the ColoHelper of second cluster
-     * @param table2 the second table (actual values)
-     * @param softAssert object used for performing assertion
-     * @return object used for performing assertion
-     * @throws java.io.IOException
-     */
-    public static SoftAssert assertTableEqual(ColoHelper cluster1, HCatTable table1,
-                                              ColoHelper cluster2, HCatTable table2,
-                                              SoftAssert softAssert,
-                                              boolean notIgnoreTblTypeAndProps) throws IOException {
-        FileSystem cluster1FS = cluster1.getClusterHelper().getHadoopFS();
-        FileSystem cluster2FS = cluster2.getClusterHelper().getHadoopFS();
-        final String table1FullName = table1.getDbName() + "." + table1.getTableName();
-        final String table2FullName = table2.getDbName() + "." + table2.getTableName();
-        LOGGER.info("Checking equality of table : " + table1FullName + " & " + table2FullName);
-        //table metadata equality
-        softAssert.assertEquals(table2.comment(), table1.comment(),
-            "Table " + table1FullName + " has different comment from " + table2FullName);
-        softAssert.assertEquals(table2.getBucketCols(), table1.getBucketCols(),
-            "Table " + table1FullName + " has different bucket columns from " + table2FullName);
-        assertColumnListEqual(table1.getCols(), table2.getCols(), softAssert);
-        softAssert.assertEquals(table2.getNumBuckets(), table1.getNumBuckets(),
-            "Table " + table1FullName + " has different number of buckets from " + table2FullName);
-        assertColumnListEqual(table1.getPartCols(), table2.getPartCols(), softAssert);
-        softAssert.assertEquals(table2.getSerdeParams(), table1.getSerdeParams(),
-            "Table " + table1FullName + " has different serde params from " + table2FullName);
-        softAssert.assertEquals(table2.getSortCols(), table1.getSortCols(),
-            "Table " + table1FullName + " has different sort columns from " + table2FullName);
-        softAssert.assertEquals(table2.getStorageHandler(), table1.getStorageHandler(),
-            "Table " + table1FullName + " has different storage handler from " + table2FullName);
-        if (notIgnoreTblTypeAndProps) {
-            softAssert.assertEquals(table2.getTabletype(), table1.getTabletype(),
-                "Table " + table1FullName + " has different Tabletype from " + table2FullName);
-        }
-        final Map<String, String> tbl1Props = table1.getTblProps();
-        final Map<String, String> tbl2Props = table2.getTblProps();
-        final String[] ignoreTblProps = {"transient_lastDdlTime", "repl.last.id",
-            "last_modified_by", "last_modified_time", "COLUMN_STATS_ACCURATE", };
-        for (String ignoreTblProp : ignoreTblProps) {
-            tbl1Props.remove(ignoreTblProp);
-            tbl2Props.remove(ignoreTblProp);
-        }
-        final String[] ignoreDefaultProps = {"numRows", "rawDataSize"};
-        for (String ignoreProp : ignoreDefaultProps) {
-            if ("-1".equals(tbl1Props.get(ignoreProp))) {
-                tbl1Props.remove(ignoreProp);
-            }
-            if ("-1".equals(tbl2Props.get(ignoreProp))) {
-                tbl2Props.remove(ignoreProp);
-            }
-        }
-
-        if (notIgnoreTblTypeAndProps) {
-            softAssert.assertEquals(tbl2Props, tbl1Props,
-                "Table " + table1FullName + " has different TblProps from " + table2FullName);
-        }
-        LOGGER.info("Checking equality of table partitions");
-        HCatClient hcatClient1 = cluster1.getClusterHelper().getHCatClient();
-        HCatClient hcatClient2 = cluster2.getClusterHelper().getHCatClient();
-        final List<HCatPartition> table1Partitions =
-            hcatClient1.getPartitions(table1.getDbName(), table1.getTableName());
-        final List<HCatPartition> table2Partitions =
-            hcatClient2.getPartitions(table2.getDbName(), table2.getTableName());
-        assertPartitionListEqual(table1Partitions, table2Partitions, softAssert);
-        if (notIgnoreTblTypeAndProps) {
-            softAssert.assertEquals(
-                cluster2FS.getContentSummary(new Path(table2.getLocation())).getLength(),
-                cluster1FS.getContentSummary(new Path(table1.getLocation())).getLength(),
-                "Size of content for table1 and table2 are different");
-        }
-
-        //table content equality
-        LOGGER.info("Checking equality of table contents");
-        Statement jdbcStmt1 = null, jdbcStmt2 = null;
-        try {
-            final boolean execute1;
-            final boolean execute2;
-            jdbcStmt1 = cluster1.getClusterHelper().getHiveJdbcConnection().createStatement();
-            jdbcStmt2 = cluster2.getClusterHelper().getHiveJdbcConnection().createStatement();
-            execute1 = jdbcStmt1.execute("select * from " + table1FullName);
-            execute2 = jdbcStmt2.execute("select * from " + table2FullName);
-            softAssert.assertEquals(execute2, execute1,
-                "Table " + table1FullName + " has different result of select * from " + table2FullName);
-            if (execute1 && execute2) {
-                final ResultSet resultSet1 = jdbcStmt1.getResultSet();
-                final ResultSet resultSet2 = jdbcStmt2.getResultSet();
-                final List<String> rows1 = HiveUtil.fetchRows(resultSet1);
-                final List<String> rows2 = HiveUtil.fetchRows(resultSet2);
-                softAssert.assertEquals(rows2, rows1,
-                    "Table " + table1FullName + " has different content from " + table2FullName);
-            }
-        } catch (SQLException e) {
-            softAssert.fail("Comparison of content of table " + table1FullName
-                + " with content of table " + table2FullName + " failed because of exception\n"
-                + ExceptionUtils.getFullStackTrace(e));
-        } finally {
-            if (jdbcStmt1 != null) {
-                try {
-                    jdbcStmt1.close();
-                } catch (SQLException e) {
-                    LOGGER.warn("Closing of jdbcStmt1 failed: " + ExceptionUtils.getFullStackTrace(e));
-                }
-            }
-            if (jdbcStmt2 != null) {
-                try {
-                    jdbcStmt2.close();
-                } catch (SQLException e) {
-                    LOGGER.warn("Closing of jdbcStmt2 failed: " + ExceptionUtils.getFullStackTrace(e));
-                }
-            }
-        }
-        return softAssert;
-    }
-
-    /**
-     * Assertion for equality of two dbs.
-     * @param cluster1 the ColoHelper of first cluster
-     * @param db1 first database for comparison (expected values)
-     * @param cluster2 the ColoHelper of second cluster
-     * @param db2 second database for comparison (actual values)
-     * @param softAssert object used for performing assertion
-     * @return object used for performing assertion
-     * @throws java.io.IOException
-     */
-    public static SoftAssert assertDbEqual(ColoHelper cluster1, HCatDatabase db1,
-                                           ColoHelper cluster2, HCatDatabase db2,
-                                           SoftAssert softAssert) throws IOException {
-        HCatClient hcatClient1 = cluster1.getClusterHelper().getHCatClient();
-        HCatClient hcatClient2 = cluster2.getClusterHelper().getHCatClient();
-        //check database name equality
-        final String db1Name = db1.getName();
-        final String db2Name = db2.getName();
-        softAssert.assertEquals(db2.getComment(), db1.getComment(), "Comment differ for the dbs");
-        //check database properties equality
-        softAssert.assertEquals(db2.getProperties(), db1.getProperties(),
-            "Database " + db1Name + " has different properties from " + db2Name);
-        //checking table equality
-        final List<String> db1tableNames = hcatClient1.listTableNamesByPattern(db1Name, ".*");
-        final List<String> db2tableNames = hcatClient2.listTableNamesByPattern(db2Name, ".*");
-        Collections.sort(db1tableNames);
-        Collections.sort(db2tableNames);
-        softAssert.assertEquals(db2tableNames, db1tableNames,
-            "Table names are not same. Actual: " + db1tableNames + " Expected: " + db2tableNames);
-        for (String tableName : db1tableNames) {
-            try {
-                assertTableEqual(cluster1, hcatClient1.getTable(db1Name, tableName),
-                    cluster2, hcatClient2.getTable(db2Name, tableName), softAssert);
-            } catch (HCatException e) {
-                softAssert.fail("Table equality check threw exception.", e);
-            }
-        }
-        return softAssert;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveUtil.java
deleted file mode 100644
index 293a210..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/HiveUtil.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.log4j.Logger;
-
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Utility class for stuff related to hive. All the methods in this class assume that they are
- * dealing with small dataset.
- */
-public final class HiveUtil {
-
-    private HiveUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final String DRIVER_NAME = "org.apache.hive.jdbc.HiveDriver";
-
-    private static final Logger LOGGER = Logger.getLogger(HiveUtil.class);
-
-    public static Connection getHiveJdbcConnection(final String jdbcUrl, final String user, final String password,
-                                                   final String hivePrincipal)
-        throws ClassNotFoundException, SQLException, IOException, InterruptedException {
-        final String transportMode = new HiveConf().get("hive.server2.transport.mode", "binary");
-        String connectionStringSuffix = "";
-        if (transportMode.equalsIgnoreCase("http")) {
-            connectionStringSuffix += "transportMode=http;httpPath=cliservice;";
-        }
-        if (MerlinConstants.IS_SECURE) {
-            connectionStringSuffix += String.format("principal=%s;kerberosAuthType=fromSubject;", hivePrincipal);
-        }
-        final String connectionStringSuffix2 = connectionStringSuffix;
-        final UserGroupInformation ugi = KerberosHelper.getUGI(user);
-        final Connection conn = ugi.doAs(new PrivilegedExceptionAction<Connection>() {
-            @Override
-            public Connection run() throws Exception {
-                Class.forName(DRIVER_NAME);
-                return DriverManager.getConnection(jdbcUrl + "/;" + connectionStringSuffix2, ugi.getShortUserName(),
-                    password);
-            }
-        });
-
-        return conn;
-    }
-
-    /**
-     * Fetch rows from a given ResultSet and convert is a a list of string, each string is comma
-     * separated column values. The output also has header with column names and footer with
-     * number of rows returned.
-     * @param rs result set
-     * @return List of string - each string corresponds to the output output that you will get on
-     * sql prompt
-     * @throws SQLException
-     */
-    public static List<String> fetchRows(ResultSet rs) throws SQLException {
-        ResultSetMetaData metaData = rs.getMetaData();
-        List<String> output = new ArrayList<String>();
-
-        int numberOfColumns = metaData.getColumnCount();
-        StringBuilder sbCol = new StringBuilder();
-        for (int i = 1; i <= numberOfColumns; i++) {
-            if (i > 1) {
-                sbCol.append(",");
-            }
-            String columnName = metaData.getColumnName(i);
-            // the column name looks like tab1.col1
-            // we want to remove table name else table equality will fail
-            if (columnName.contains(".")) {
-                columnName = columnName.split("\\.")[1];
-            }
-            sbCol.append("'").append(columnName).append("'");
-        }
-        LOGGER.info(sbCol.toString());
-        output.add(sbCol.toString());
-
-        int numberOfRows = 0;
-        while (rs.next()) {
-            StringBuilder sbVal = new StringBuilder();
-            numberOfRows++;
-            for (int i = 1; i <= numberOfColumns; i++) {
-                if (i > 1) {
-                    sbVal.append(",");
-                }
-                String columnValue = rs.getString(i);
-                sbVal.append("'").append(columnValue != null ? columnValue : "").append("'");
-            }
-            LOGGER.info(sbVal.toString());
-            output.add(sbVal.toString());
-        }
-        Collections.sort(output); //sorting to ensure stability results across different runs
-        String rowStr = (numberOfRows > 0 ? numberOfRows : "No")
-            + (numberOfRows == 1 ? " row" : " rows") + " selected";
-        LOGGER.info(rowStr);
-        output.add(rowStr);
-        return output;
-    }
-
-    /**
-     * Run a sql using given connection.
-     * @param connection The connection to be used for running sql
-     * @param sql the sql to be run
-     * @throws SQLException
-     * @return output of the query as a List of strings
-     */
-    public static List<String> runSql(Connection connection, String sql) throws SQLException {
-        Statement stmt = null;
-        try {
-            stmt = connection.createStatement();
-            LOGGER.info("Executing: " + sql);
-            stmt.execute(sql);
-            final ResultSet resultSet = stmt.getResultSet();
-            if (resultSet != null) {
-                final List<String> output = fetchRows(resultSet);
-                LOGGER.info("Results are:\n" + StringUtils.join(output, "\n"));
-                return output;
-            }
-            LOGGER.info("Query executed.");
-        } finally {
-            if (stmt != null) {
-                stmt.close();
-            }
-        }
-        return new ArrayList<>();
-    }
-}


[33/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/AbstractTestBase.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/AbstractTestBase.java b/common/src/test/java/org/apache/falcon/entity/AbstractTestBase.java
deleted file mode 100644
index fd963e5..0000000
--- a/common/src/test/java/org/apache/falcon/entity/AbstractTestBase.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.testng.annotations.BeforeClass;
-
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import java.io.File;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.net.URI;
-import java.util.Collection;
-
-/**
- * Base class for config store test.
- */
-public class AbstractTestBase {
-    protected static final String USER = System.getProperty("user.name");
-
-    protected static final String PROCESS_XML = "/config/process/process-0.1.xml";
-    protected static final String FEED_XML = "/config/feed/feed-0.1.xml";
-    protected static final String FEED3_XML = "/config/feed/feed-0.3.xml";
-    protected static final String FEED4_XML = "/config/feed/feed-0.4.xml";
-    protected static final String CLUSTER_XML = "/config/cluster/cluster-0.1.xml";
-    protected static final String DATASOURCE_XML = "/config/datasource/datasource-0.1.xml";
-    protected EmbeddedCluster dfsCluster;
-    protected Configuration conf = new Configuration();
-    private ConfigurationStore store;
-
-    public ConfigurationStore getStore() {
-        return store;
-    }
-
-    @BeforeClass
-    public void initConfigStore() throws Exception {
-        String configPath = new URI(StartupProperties.get().getProperty("config.store.uri")).getPath();
-        String location = configPath + "-" + getClass().getName();
-        StartupProperties.get().setProperty("config.store.uri", location);
-        FileUtils.deleteDirectory(new File(location));
-
-        cleanupStore();
-        String listeners = StartupProperties.get().getProperty("configstore.listeners");
-        listeners = listeners.replace("org.apache.falcon.service.SharedLibraryHostingService", "");
-        listeners = listeners.replace("org.apache.falcon.service.FeedSLAMonitoringService", "");
-        StartupProperties.get().setProperty("configstore.listeners", listeners);
-        store = ConfigurationStore.get();
-        store.init();
-
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_2);
-        UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_2, new String[]{"testgroup"});
-    }
-
-    protected void cleanupStore() throws FalconException {
-        store = ConfigurationStore.get();
-        for (EntityType type : EntityType.values()) {
-            Collection<String> entities = store.getEntities(type);
-            for (String entity : entities) {
-                store.remove(type, entity);
-            }
-        }
-    }
-
-    protected void storeEntity(EntityType type, String name) throws Exception {
-        final String proxyUser = CurrentUser.getUser();
-        final String defaultGroupName = CurrentUser.getPrimaryGroupName();
-
-        Unmarshaller unmarshaller = type.getUnmarshaller();
-        store = ConfigurationStore.get();
-        store.remove(type, name);
-        switch (type) {
-        case CLUSTER:
-            Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass().getResource(CLUSTER_XML));
-            cluster.setName(name);
-            ClusterHelper.getInterface(cluster, Interfacetype.WRITE)
-                    .setEndpoint(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
-            decorateACL(proxyUser, defaultGroupName, cluster);
-
-            store.publish(type, cluster);
-            break;
-
-        case FEED:
-            Feed feed = (Feed) unmarshaller.unmarshal(this.getClass().getResource(FEED_XML));
-            feed.setName(name);
-            decorateACL(proxyUser, defaultGroupName, feed);
-
-            store.publish(type, feed);
-            break;
-
-        case PROCESS:
-            Process process = (Process) unmarshaller.unmarshal(this.getClass().getResource(PROCESS_XML));
-            process.setName(name);
-            FileSystem fs = dfsCluster.getFileSystem();
-            fs.mkdirs(new Path(process.getWorkflow().getPath()));
-            if (!fs.exists(new Path(process.getWorkflow() + "/lib"))) {
-                fs.mkdirs(new Path(process.getWorkflow() + "/lib"));
-            }
-
-            decorateACL(proxyUser, defaultGroupName, process);
-
-            store.publish(type, process);
-            break;
-        default:
-        }
-    }
-
-    protected void deleteEntity(EntityType type, String name) throws FalconException {
-        store.remove(type, name);
-    }
-
-
-
-    private void decorateACL(String proxyUser, String defaultGroupName, Cluster cluster) {
-        if (cluster.getACL() != null) {
-            return;
-        }
-
-        org.apache.falcon.entity.v0.cluster.ACL clusterACL =
-                new org.apache.falcon.entity.v0.cluster.ACL();
-        clusterACL.setOwner(proxyUser);
-        clusterACL.setGroup(defaultGroupName);
-        cluster.setACL(clusterACL);
-    }
-
-    private void decorateACL(String proxyUser, String defaultGroupName, Feed feed) {
-        if (feed.getACL() != null) {
-            return;
-        }
-
-        org.apache.falcon.entity.v0.feed.ACL feedACL =
-                new org.apache.falcon.entity.v0.feed.ACL();
-        feedACL.setOwner(proxyUser);
-        feedACL.setGroup(defaultGroupName);
-        feed.setACL(feedACL);
-    }
-
-    private void decorateACL(String proxyUser, String defaultGroupName,
-                             Process process) {
-        if (process.getACL() != null) {
-            return;
-        }
-
-        org.apache.falcon.entity.v0.process.ACL processACL =
-                new org.apache.falcon.entity.v0.process.ACL();
-        processACL.setOwner(proxyUser);
-        processACL.setGroup(defaultGroupName);
-        process.setACL(processACL);
-    }
-
-    public void setup() throws Exception {
-        store = ConfigurationStore.get();
-        for (EntityType type : EntityType.values()) {
-            for (String name : store.getEntities(type)) {
-                store.remove(type, name);
-            }
-        }
-        storeEntity(EntityType.CLUSTER, "corp");
-        storeEntity(EntityType.FEED, "clicks");
-        storeEntity(EntityType.FEED, "impressions");
-        storeEntity(EntityType.FEED, "clicksummary");
-        storeEntity(EntityType.PROCESS, "clicksummary");
-    }
-
-    public String marshallEntity(final Entity entity) throws FalconException,
-                                                             JAXBException {
-        Marshaller marshaller = entity.getEntityType().getMarshaller();
-        StringWriter stringWriter = new StringWriter();
-        marshaller.marshal(entity, stringWriter);
-        return stringWriter.toString();
-    }
-
-    // assumes there will always be at least one group for a logged in user
-    protected String getPrimaryGroupName() throws IOException {
-        return CurrentUser.getPrimaryGroupName();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/CatalogStorageTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/CatalogStorageTest.java b/common/src/test/java/org/apache/falcon/entity/CatalogStorageTest.java
deleted file mode 100644
index 5d06431..0000000
--- a/common/src/test/java/org/apache/falcon/entity/CatalogStorageTest.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.testng.Assert;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.net.URISyntaxException;
-
-/**
- * Test class for Catalog Table Storage.
- * Exists will be covered in integration tests as it actually checks if the table exists.
- */
-public class CatalogStorageTest {
-
-    @Test
-    public void testGetType() throws Exception {
-        String table = "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us";
-        CatalogStorage storage = new CatalogStorage(CatalogStorage.CATALOG_URL, table);
-        Assert.assertEquals(Storage.TYPE.TABLE, storage.getType());
-    }
-
-    @Test
-    public void testParseFeedUriValid() throws URISyntaxException {
-        String table = "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us";
-        CatalogStorage storage = new CatalogStorage(CatalogStorage.CATALOG_URL, table);
-        Assert.assertEquals("${hcatNode}", storage.getCatalogUrl());
-        Assert.assertEquals("clicksdb", storage.getDatabase());
-        Assert.assertEquals("clicks", storage.getTable());
-        Assert.assertEquals(Storage.TYPE.TABLE, storage.getType());
-        Assert.assertEquals(2, storage.getPartitions().size());
-        Assert.assertEquals("us", storage.getPartitionValue("region"));
-        Assert.assertTrue(storage.hasPartition("region"));
-        Assert.assertNull(storage.getPartitionValue("unknown"));
-        Assert.assertFalse(storage.hasPartition("unknown"));
-        Assert.assertEquals(storage.getDatedPartitionKeys().get(0), "ds");
-    }
-
-    @Test
-    public void testParseFeedUriValid2() throws URISyntaxException {
-        String table = "catalog:clicksdb:clicks#ds=${YEAR}${MONTH}${DAY};region=us";
-        CatalogStorage storage = new CatalogStorage(CatalogStorage.CATALOG_URL, table);
-        Assert.assertEquals("${hcatNode}", storage.getCatalogUrl());
-        Assert.assertEquals("clicksdb", storage.getDatabase());
-        Assert.assertEquals("clicks", storage.getTable());
-        Assert.assertEquals(Storage.TYPE.TABLE, storage.getType());
-        Assert.assertEquals(2, storage.getPartitions().size());
-        Assert.assertEquals("us", storage.getPartitionValue("region"));
-        Assert.assertTrue(storage.hasPartition("region"));
-        Assert.assertNull(storage.getPartitionValue("unknown"));
-        Assert.assertFalse(storage.hasPartition("unknown"));
-        Assert.assertEquals(storage.getDatedPartitionKeys().get(0), "ds");
-    }
-
-    @Test
-    public void testCreateFromUriTemplate() throws Exception {
-        String uriTemplate = "thrift://localhost:49083/clicksdb/clicks/region=us;ds=${YEAR}-${MONTH}-${DAY}";
-        CatalogStorage storage = new CatalogStorage(uriTemplate);
-        Assert.assertEquals("thrift://localhost:49083", storage.getCatalogUrl());
-        Assert.assertEquals("clicksdb", storage.getDatabase());
-        Assert.assertEquals("clicks", storage.getTable());
-        Assert.assertEquals(Storage.TYPE.TABLE, storage.getType());
-        Assert.assertEquals(2, storage.getPartitions().size());
-        Assert.assertEquals("us", storage.getPartitionValue("region"));
-        Assert.assertTrue(storage.hasPartition("region"));
-        Assert.assertNull(storage.getPartitionValue("unknown"));
-        Assert.assertFalse(storage.hasPartition("unknown"));
-    }
-
-    @DataProvider(name = "invalidFeedURITemplates")
-    public Object[][] createInValidFeedUriTemplates() {
-        return new Object[][] {
-            {"thrift://localhost:49083/clicksdb/clicks/region=us;ds=${YEAR}/${MONTH}/${DAY}"},
-            {"thrift://localhost:49083/clicksdb/clicks/region=us;ds=${YEAR}/${MONTH}-${DAY}"},
-        };
-    }
-
-    @Test(dataProvider = "invalidFeedURITemplates", expectedExceptions = URISyntaxException.class)
-    public void testParseInvalidFeedUriTemplate(String uriTemplate) throws URISyntaxException {
-        new CatalogStorage(uriTemplate);
-        Assert.fail("Exception must have been thrown");
-    }
-
-    @DataProvider(name = "invalidFeedURIs")
-    public Object[][] createFeedUriInvalid() {
-        return new Object[][] {
-            {"catalog:default:clicks:ds=${YEAR}-${MONTH}-${DAY}#region=us"},
-            {"default:clicks:ds=${YEAR}-${MONTH}-${DAY}#region=us"},
-            {"catalog:default#ds=${YEAR}-${MONTH}-${DAY};region=us"},
-            {"catalog://default/clicks#ds=${YEAR}-${MONTH}-${DAY}:region=us"},
-        };
-    }
-
-    @Test(dataProvider = "invalidFeedURIs", expectedExceptions = URISyntaxException.class)
-    public void testParseFeedUriInvalid(String tableUri) throws URISyntaxException {
-        new CatalogStorage(CatalogStorage.CATALOG_URL, tableUri);
-        Assert.fail("Exception must have been thrown");
-    }
-
-    @Test
-    public void testIsIdenticalPositive() throws Exception {
-        CatalogStorage table1 = new CatalogStorage(CatalogStorage.CATALOG_URL,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        CatalogStorage table2 = new CatalogStorage(CatalogStorage.CATALOG_URL,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        Assert.assertTrue(table1.isIdentical(table2));
-
-        final String catalogUrl = "thrift://localhost:49083";
-        CatalogStorage table3 = new CatalogStorage(catalogUrl,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        CatalogStorage table4 = new CatalogStorage(catalogUrl,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        Assert.assertTrue(table3.isIdentical(table4));
-    }
-
-    @Test
-    public void testIsIdenticalNegative() throws Exception {
-        CatalogStorage table1 = new CatalogStorage(CatalogStorage.CATALOG_URL,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        CatalogStorage table2 = new CatalogStorage(CatalogStorage.CATALOG_URL,
-                "catalog:clicksdb:impressions#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        Assert.assertFalse(table1.isIdentical(table2));
-
-        final String catalogUrl = "thrift://localhost:49083";
-        CatalogStorage table3 = new CatalogStorage(catalogUrl,
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        CatalogStorage table4 = new CatalogStorage(catalogUrl,
-                "catalog:clicksdb:impressions#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        Assert.assertFalse(table3.isIdentical(table4));
-
-        CatalogStorage table5 = new CatalogStorage("thrift://localhost:49084",
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        CatalogStorage table6 = new CatalogStorage("thrift://localhost:49083",
-                "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us");
-        Assert.assertFalse(table5.isIdentical(table6));
-    }
-
-    @Test
-    public void testGetUriTemplateWithCatalogUrl() throws Exception {
-        final String catalogUrl = "thrift://localhost:49083";
-        String tableUri = "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us";
-        String uriTemplate = "thrift://localhost:49083/clicksdb/clicks/ds=${YEAR}-${MONTH}-${DAY};region=us";
-
-        CatalogStorage table = new CatalogStorage(catalogUrl, tableUri);
-
-        Assert.assertEquals(uriTemplate, table.getUriTemplate());
-        Assert.assertEquals(uriTemplate, table.getUriTemplate(LocationType.DATA));
-        Assert.assertEquals(table.getUriTemplate(), table.getUriTemplate(LocationType.DATA));
-    }
-
-    @Test
-    public void testGetUriTemplateWithOutCatalogUrl() throws Exception {
-        String tableUri = "catalog:clicksdb:clicks#ds=${YEAR}-${MONTH}-${DAY};region=us";
-        String uriTemplate = "${hcatNode}/clicksdb/clicks/ds=${YEAR}-${MONTH}-${DAY};region=us";
-
-        CatalogStorage table = new CatalogStorage(CatalogStorage.CATALOG_URL, tableUri);
-
-        Assert.assertEquals(uriTemplate, table.getUriTemplate());
-        Assert.assertEquals(uriTemplate, table.getUriTemplate(LocationType.DATA));
-        Assert.assertEquals(table.getUriTemplate(), table.getUriTemplate(LocationType.DATA));
-    }
-
-    @Test
-    public void testToPartitionFilter() throws Exception {
-        final String catalogUrl = "thrift://localhost:49083";
-        String tableUri = "catalog:clicksdb:clicks#ds=20130918;region=us";
-        String partitionFilter = "(ds='20130918';region='us')";
-
-        CatalogStorage table = new CatalogStorage(catalogUrl, tableUri);
-        Assert.assertEquals(table.toPartitionFilter(), partitionFilter);
-    }
-
-    @Test
-    public void testToPartitionAsPath() throws Exception {
-        final String catalogUrl = "thrift://localhost:49083";
-        String tableUri = "catalog:clicksdb:clicks#ds=20130918;region=us";
-        String partitionPath = "ds=20130918/region=us";
-
-        CatalogStorage table = new CatalogStorage(catalogUrl, tableUri);
-        Assert.assertEquals(table.toPartitionAsPath(), partitionPath);
-    }
-
-    @Test
-    public void testCreateFromURL() throws Exception {
-        String url = "thrift://localhost:29083/falcon_db/output_table/ds=2012-04-21-00";
-        CatalogStorage storage = new CatalogStorage(url);
-        Assert.assertEquals("thrift://localhost:29083", storage.getCatalogUrl());
-        Assert.assertEquals("falcon_db", storage.getDatabase());
-        Assert.assertEquals("output_table", storage.getTable());
-        Assert.assertEquals(Storage.TYPE.TABLE, storage.getType());
-        Assert.assertEquals(1, storage.getPartitions().size());
-        Assert.assertEquals("2012-04-21-00", storage.getPartitionValue("ds"));
-        Assert.assertTrue(storage.hasPartition("ds"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/ColoClusterRelationTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/ColoClusterRelationTest.java b/common/src/test/java/org/apache/falcon/entity/ColoClusterRelationTest.java
deleted file mode 100644
index 0d6e754..0000000
--- a/common/src/test/java/org/apache/falcon/entity/ColoClusterRelationTest.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import java.util.Set;
-
-/**
- * Tests for validating relationship between cluster to data center/co-location.
- */
-@Test
-public class ColoClusterRelationTest extends AbstractTestBase {
-
-    private Cluster newCluster(String name, String colo) {
-        Cluster cluster = new Cluster();
-        cluster.setName(name);
-        cluster.setColo(colo);
-        return cluster;
-    }
-
-    @Test
-    public void testMapping() throws Exception {
-        Cluster cluster1 = newCluster("cluster1", "colo1");
-        Cluster cluster2 = newCluster("cluster2", "colo1");
-        Cluster cluster3 = newCluster("cluster3", "colo2");
-        getStore().publish(EntityType.CLUSTER, cluster1);
-        getStore().publish(EntityType.CLUSTER, cluster2);
-        getStore().publish(EntityType.CLUSTER, cluster3);
-
-        ColoClusterRelation relation = ColoClusterRelation.get();
-        Set<String> clusters = relation.getClusters("colo1");
-        Assert.assertNotNull(clusters);
-        Assert.assertEquals(2, clusters.size());
-        Assert.assertTrue(clusters.contains(cluster1.getName()));
-        Assert.assertTrue(clusters.contains(cluster2.getName()));
-
-        clusters = relation.getClusters("colo2");
-        Assert.assertNotNull(clusters);
-        Assert.assertEquals(1, clusters.size());
-        Assert.assertTrue(clusters.contains(cluster3.getName()));
-
-        getStore().remove(EntityType.CLUSTER, cluster1.getName());
-        clusters = relation.getClusters("colo1");
-        Assert.assertNotNull(clusters);
-        Assert.assertEquals(1, clusters.size());
-        Assert.assertTrue(clusters.contains(cluster2.getName()));
-
-        getStore().remove(EntityType.CLUSTER, cluster2.getName());
-        clusters = relation.getClusters("colo1");
-        Assert.assertNotNull(clusters);
-        Assert.assertEquals(0, clusters.size());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/EntityTypeTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/EntityTypeTest.java b/common/src/test/java/org/apache/falcon/entity/EntityTypeTest.java
deleted file mode 100644
index 5a4d6ec..0000000
--- a/common/src/test/java/org/apache/falcon/entity/EntityTypeTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- * Test for validating entity types.
- */
-public class EntityTypeTest {
-
-    @Test
-    public void testGetEntityClass() {
-        Assert.assertEquals(EntityType.PROCESS.getEntityClass().getName(),
-                "org.apache.falcon.entity.v0.process.Process");
-    }
-
-    @Test
-    public void testIsSchedulable() {
-        Assert.assertTrue(EntityType.PROCESS.isSchedulable());
-        Assert.assertTrue(EntityType.FEED.isSchedulable());
-        Assert.assertFalse(EntityType.CLUSTER.isSchedulable());
-        Assert.assertFalse(EntityType.DATASOURCE.isSchedulable());
-    }
-
-    @Test
-    public void testValidEntityTypes() {
-        Assert.assertEquals(EntityType.FEED, EntityType.getEnum("feed"));
-        Assert.assertEquals(EntityType.FEED, EntityType.getEnum("FeEd"));
-        Assert.assertEquals(EntityType.CLUSTER, EntityType.getEnum("cluster"));
-        Assert.assertEquals(EntityType.CLUSTER, EntityType.getEnum("cluSTER"));
-        Assert.assertEquals(EntityType.PROCESS, EntityType.getEnum("process"));
-        Assert.assertEquals(EntityType.PROCESS, EntityType.getEnum("pRocess"));
-        Assert.assertEquals(EntityType.DATASOURCE, EntityType.getEnum("datasource"));
-        Assert.assertEquals(EntityType.DATASOURCE, EntityType.getEnum("dataSource"));
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testInvalidEntityTypes() throws Exception {
-        EntityType.getEnum("invalid");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java b/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java
deleted file mode 100644
index c87449c..0000000
--- a/common/src/test/java/org/apache/falcon/entity/EntityUtilTest.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.Pair;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.parser.ClusterEntityParser;
-import org.apache.falcon.entity.parser.EntityParserFactory;
-import org.apache.falcon.entity.parser.ProcessEntityParser;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LateArrival;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TimeZone;
-
-/**
- * Test for validating Entity util helper methods.
- */
-public class EntityUtilTest extends AbstractTestBase {
-    private static TimeZone tz = TimeZone.getTimeZone("UTC");
-
-    @Test
-    public void testProcessView() throws Exception {
-        Process process = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(
-                getClass().getResourceAsStream(PROCESS_XML));
-        Cluster cluster = new Cluster();
-        cluster.setName("newCluster");
-        cluster.setValidity(process.getClusters().getClusters().get(0).getValidity());
-        process.getClusters().getClusters().add(cluster);
-        Assert.assertEquals(process.getClusters().getClusters().size(), 2);
-        String currentCluster = process.getClusters().getClusters().get(0).getName();
-        Process newProcess = EntityUtil.getClusterView(process, currentCluster);
-        Assert.assertFalse(EntityUtil.equals(process, newProcess));
-        Assert.assertEquals(newProcess.getClusters().getClusters().size(), 1);
-        Assert.assertEquals(newProcess.getClusters().getClusters().get(0).getName(), currentCluster);
-    }
-
-    @Test
-    public void testFeedView() throws Exception {
-        Feed feed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                getClass().getResourceAsStream(FEED_XML));
-        Feed view = EntityUtil.getClusterView(feed, "testCluster");
-        Assert.assertEquals(view.getClusters().getClusters().size(), 1);
-        Assert.assertEquals(view.getClusters().getClusters().get(0).getName(), "testCluster");
-
-        view = EntityUtil.getClusterView(feed, "backupCluster");
-        Assert.assertEquals(view.getClusters().getClusters().size(), 2);
-    }
-
-    @Test
-    public void testEquals() throws Exception {
-        Process process1 = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(
-                getClass().getResourceAsStream(PROCESS_XML));
-        Process process2 = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(
-                getClass().getResourceAsStream(PROCESS_XML));
-        Assert.assertTrue(EntityUtil.equals(process1, process2));
-        Assert.assertTrue(EntityUtil.md5(process1).equals(EntityUtil.md5(process2)));
-
-        process2.getClusters().getClusters().get(0).getValidity().setEnd(
-                SchemaHelper.parseDateUTC("2013-04-21T00:00Z"));
-        Assert.assertFalse(EntityUtil.equals(process1, process2));
-        Assert.assertFalse(EntityUtil.md5(process1).equals(EntityUtil.md5(process2)));
-        Assert.assertTrue(EntityUtil.equals(process1, process2, new String[]{"clusters.clusters[\\d+].validity.end"}));
-    }
-
-    private static Date getDate(String date) throws Exception {
-        DateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm Z");
-        return format.parse(date);
-    }
-
-    @Test
-    public void testGetNextStartTime() throws Exception {
-        Date now = getDate("2012-04-03 02:45 UTC");
-        Date start = getDate("2012-04-02 03:00 UTC");
-        Date newStart = getDate("2012-04-03 03:00 UTC");
-
-        Frequency frequency = new Frequency("hours(1)");
-        Assert.assertEquals(newStart, EntityUtil.getNextStartTime(start,
-                frequency, tz, now));
-    }
-
-    @Test
-    public void testgetNextStartTimeOld() throws Exception {
-        Date now = getDate("2012-05-02 02:45 UTC");
-        Date start = getDate("2012-02-01 03:00 UTC");
-        Date newStart = getDate("2012-05-02 03:00 UTC");
-
-        Frequency frequency = new Frequency("days(7)");
-        Assert.assertEquals(newStart, EntityUtil.getNextStartTime(start,
-                frequency, tz, now));
-    }
-
-    @Test
-    public void testGetNextStartTime2() throws Exception {
-        Date now = getDate("2010-05-02 04:45 UTC");
-        Date start = getDate("2010-02-01 03:00 UTC");
-        Date newStart = getDate("2010-05-03 03:00 UTC");
-
-        Frequency frequency = new Frequency("days(7)");
-        Assert.assertEquals(newStart, EntityUtil.getNextStartTime(start,
-                frequency, tz, now));
-    }
-
-    @Test
-    public void testGetNextStartTime3() throws Exception {
-        Date now = getDate("2010-05-02 04:45 UTC");
-        Date start = getDate("1980-02-01 03:00 UTC");
-        Date newStart = getDate("2010-05-07 03:00 UTC");
-
-        Frequency frequency = new Frequency("days(7)");
-        Assert.assertEquals(newStart, EntityUtil.getNextStartTime(start,
-                frequency, tz, now));
-    }
-
-
-    @Test
-    public void testGetInstanceSequence() throws Exception {
-        Date instance = getDate("2012-05-22 13:40 UTC");
-        Date start = getDate("2012-05-14 07:40 UTC");
-
-        Frequency frequency = new Frequency("hours(1)");
-        Assert.assertEquals(199, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence1() throws Exception {
-        Date instance = getDate("2012-05-22 12:40 UTC");
-        Date start = getDate("2012-05-14 07:40 UTC");
-
-        Frequency frequency = Frequency.fromString("hours(1)");
-        Assert.assertEquals(198, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence2() throws Exception {
-        Date instance = getDate("2012-05-22 12:41 UTC");
-        Date start = getDate("2012-05-14 07:40 UTC");
-
-        Frequency frequency = Frequency.fromString("hours(1)");
-        Assert.assertEquals(199, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence3() throws Exception {
-        Date instance = getDate("2010-01-02 01:01 UTC");
-        Date start = getDate("2010-01-02 01:00 UTC");
-
-        Frequency frequency = Frequency.fromString("minutes(1)");
-        Assert.assertEquals(2, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence4() throws Exception {
-        Date instance = getDate("2010-01-01 01:03 UTC");
-        Date start = getDate("2010-01-01 01:01 UTC");
-
-        Frequency frequency = Frequency.fromString("minutes(2)");
-        Assert.assertEquals(2, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence5() throws Exception {
-        Date instance = getDate("2010-01-01 02:01 UTC");
-        Date start = getDate("2010-01-01 01:01 UTC");
-
-        Frequency frequency = Frequency.fromString("hours(1)");
-        Assert.assertEquals(2, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence6() throws Exception {
-        Date instance = getDate("2010-01-01 01:04 UTC");
-        Date start = getDate("2010-01-01 01:01 UTC");
-
-        Frequency frequency = Frequency.fromString("minutes(3)");
-        Assert.assertEquals(2, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetInstanceSequence7() throws Exception {
-        Date instance = getDate("2010-01-01 01:03 UTC");
-        Date start = getDate("2010-01-01 01:01 UTC");
-
-        Frequency frequency = Frequency.fromString("minutes(1)");
-        Assert.assertEquals(3, EntityUtil.getInstanceSequence(start,
-                frequency, tz, instance));
-    }
-
-    @Test
-    public void testGetNextStartTimeMonthly() throws Exception {
-        Date startDate = getDate("2012-06-02 10:00 UTC");
-        Date nextAfter = getDate("2136-06-02 10:00 UTC");
-        Frequency frequency = Frequency.fromString("months(1)");
-        Date expectedResult = nextAfter;
-        Date result = EntityUtil.getNextStartTime(startDate, frequency, tz, nextAfter);
-        Assert.assertEquals(result, expectedResult);
-    }
-
-    @Test
-    public void testGetEntityStartEndDates() throws Exception {
-        Process process = (Process) EntityType.PROCESS.getUnmarshaller().unmarshal(
-                getClass().getResourceAsStream(PROCESS_XML));
-
-        Cluster cluster = new Cluster();
-        cluster.setName("testCluster");
-        cluster.setValidity(process.getClusters().getClusters().get(0).getValidity());
-
-        process.getClusters().getClusters().add(cluster);
-
-        Date expectedStartDate = new SimpleDateFormat("yyyy-MM-dd z").parse("2011-11-02 UTC");
-        Date expectedEndDate = new SimpleDateFormat("yyyy-MM-dd z").parse("2091-12-30 UTC");
-
-        Pair<Date, Date> startEndDates = EntityUtil.getEntityStartEndDates(process);
-        Assert.assertEquals(startEndDates.first, expectedStartDate);
-        Assert.assertEquals(startEndDates.second, expectedEndDate);
-    }
-
-    @Test
-    public void testGetFeedProperties() {
-        Feed feed = new Feed();
-        org.apache.falcon.entity.v0.feed.Properties props = new org.apache.falcon.entity.v0.feed.Properties();
-        Property queue = new Property();
-        String name = "Q";
-        String value = "head of Q division!";
-        queue.setName(name);
-        queue.setValue(value);
-        props.getProperties().add(queue);
-        feed.setProperties(props);
-        Properties actual = EntityUtil.getEntityProperties(feed);
-        Assert.assertEquals(actual.size(), 1);
-        Assert.assertEquals(actual.getProperty(name), value);
-    }
-
-    @Test
-    public void testGetProcessProperties() {
-        org.apache.falcon.entity.v0.cluster.Cluster cluster = new org.apache.falcon.entity.v0.cluster.Cluster();
-        org.apache.falcon.entity.v0.cluster.Properties props = new org.apache.falcon.entity.v0.cluster.Properties();
-        org.apache.falcon.entity.v0.cluster.Property priority = new org.apache.falcon.entity.v0.cluster.Property();
-        String name = "priority";
-        String value = "Sister of Moriarity!";
-        priority.setName(name);
-        priority.setValue(value);
-        props.getProperties().add(priority);
-        cluster.setProperties(props);
-        Properties actual = EntityUtil.getEntityProperties(cluster);
-        Assert.assertEquals(actual.size(), 1);
-        Assert.assertEquals(actual.getProperty(name), value);
-    }
-
-    @Test
-    public void testGetClusterProperties() {
-        Process process = new Process();
-        org.apache.falcon.entity.v0.process.Properties props = new org.apache.falcon.entity.v0.process.Properties();
-        org.apache.falcon.entity.v0.process.Property priority = new org.apache.falcon.entity.v0.process.Property();
-        String name = "M";
-        String value = "Minions!";
-        priority.setName(name);
-        priority.setValue(value);
-        props.getProperties().add(priority);
-        process.setProperties(props);
-        Properties actual = EntityUtil.getEntityProperties(process);
-        Assert.assertEquals(actual.size(), 1);
-        Assert.assertEquals(actual.getProperty(name), value);
-
-    }
-
-    @Test
-    public void testGetLateProcessFeed() throws FalconException {
-        Feed feed = new Feed();
-
-        Assert.assertNull(EntityUtil.getLateProcess(feed));
-        LateArrival lateArrival = new LateArrival();
-        lateArrival.setCutOff(Frequency.fromString("days(1)"));
-        feed.setLateArrival(lateArrival);
-        Assert.assertNotNull(EntityUtil.getLateProcess(feed));
-    }
-
-    @Test(dataProvider = "NextInstanceExpressions")
-    public void testGetNextInstances(String instanceTimeStr, String frequencyStr, int instanceIncrementCount,
-                                     String expectedInstanceTimeStr) throws Exception {
-
-        Date instanceTime = getDate(instanceTimeStr);
-        Frequency frequency = Frequency.fromString(frequencyStr);
-
-        Date nextInstanceTime = EntityUtil.getNextInstanceTime(instanceTime, frequency, tz, instanceIncrementCount);
-
-        Assert.assertEquals(nextInstanceTime, getDate(expectedInstanceTimeStr));
-
-    }
-
-    @DataProvider(name = "NextInstanceExpressions")
-    public Object[][] nextInstanceExpressions() throws ParseException {
-        String instanceTimeStr = "2014-01-01 00:00 UTC";
-        return new Object[][] {
-            {instanceTimeStr, "minutes(1)", 1, "2014-01-01 00:01 UTC"},
-            {instanceTimeStr, "minutes(1)", 25, "2014-01-01 00:25 UTC"},
-
-            {instanceTimeStr, "hours(1)", 1, "2014-01-01 01:00 UTC"},
-            {instanceTimeStr, "hours(1)", 5, "2014-01-01 05:00 UTC"},
-
-            {instanceTimeStr, "days(1)", 1, "2014-01-02 00:00 UTC"},
-            {instanceTimeStr, "days(1)", 10, "2014-01-11 00:00 UTC"},
-
-            {instanceTimeStr, "months(1)", 1, "2014-02-01 00:00 UTC"},
-            {instanceTimeStr, "months(1)", 7, "2014-08-01 00:00 UTC"},
-        };
-    }
-
-    @Test(dataProvider = "bundlePaths")
-    public void testIsStagingPath(Path path, boolean createPath, boolean expected) throws Exception {
-        ClusterEntityParser parser = (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-        InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-        org.apache.falcon.entity.v0.cluster.Cluster cluster = parser.parse(stream);
-
-        ProcessEntityParser processParser = (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-        stream = this.getClass().getResourceAsStream(PROCESS_XML);
-        Process process = processParser.parse(stream);
-
-        FileSystem fs = HadoopClientFactory.get().
-                createFalconFileSystem(ClusterHelper.getConfiguration(cluster));
-        if (createPath && !fs.exists(path)) {
-            fs.create(path);
-        }
-
-        Assert.assertEquals(EntityUtil.isStagingPath(cluster, process, path), expected);
-    }
-
-    @DataProvider(name = "bundlePaths")
-    public Object[][] getBundlePaths() {
-        return new Object[][] {
-            {new Path("/projects/falcon/staging/ivory/workflows/process/sample/"), true, true},
-            {new Path("/projects/falcon/staging/falcon/workflows/process/sample/"), true, true},
-            {new Path("/projects/abc/falcon/workflows/process/sample/"), true, false},
-            {new Path("/projects/falcon/staging/falcon/workflows/process/test-process/"), false, false},
-            {new Path("/projects/falcon/staging/falcon/workflows/process/test-process/"), true, false},
-        };
-    }
-
-    @Test
-    public void testStringToProps() {
-        String testPropsString = "key1:value1,key2 : value2 , key3: value3, key4:value4:test";
-        Map<String, String> props = EntityUtil.getPropertyMap(testPropsString);
-        Assert.assertEquals(props.size(), 4);
-        for (int i = 1; i <= 3; i++) {
-            Assert.assertEquals(props.get("key" + i), "value" + i);
-        }
-        Assert.assertEquals(props.get("key4"), "value4:test");
-    }
-
-    @Test (expectedExceptions = IllegalArgumentException.class,
-            expectedExceptionsMessageRegExp = "Found invalid property .*",
-            dataProvider = "InvalidProps")
-    public void testInvalidStringToProps(String propString) {
-        String[] invalidProps = {"key1", "key1=value1", "key1:value1,key2=value2, :value"};
-        EntityUtil.getPropertyMap(propString);
-    }
-
-    @DataProvider(name = "InvalidProps")
-    public Object[][] getInvalidProps() {
-        return new Object[][]{
-            {"key1"},
-            {"key1=value1"},
-            {"key1:value1,key2=value2"},
-            {":value"},
-        };
-    }
-
-    @Test
-    public void testGetLatestStagingPath() throws FalconException, IOException {
-        ClusterEntityParser parser = (ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER);
-        InputStream stream = this.getClass().getResourceAsStream(CLUSTER_XML);
-        org.apache.falcon.entity.v0.cluster.Cluster cluster = parser.parse(stream);
-
-        ProcessEntityParser processParser = (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
-        stream = this.getClass().getResourceAsStream(PROCESS_XML);
-        Process process = processParser.parse(stream);
-        process.setName("staging-test");
-
-        String md5 = EntityUtil.md5(EntityUtil.getClusterView(process, "testCluster"));
-        FileSystem fs = HadoopClientFactory.get().
-                createFalconFileSystem(ClusterHelper.getConfiguration(cluster));
-
-        String basePath = "/projects/falcon/staging/falcon/workflows/process/staging-test/";
-        Path[] paths = {
-            new Path(basePath + "5a8100dc460b44db2e7bfab84b24cb92_1436441045003"),
-            new Path(basePath + "6b3a1b6c7cf9de62c78b125415ffb70c_1436504488677"),
-            new Path(basePath + md5 + "_1436344303117"),
-            new Path(basePath + md5 + "_1436347924846"),
-            new Path(basePath + md5 + "_1436357052992"),
-            new Path(basePath + "logs"),
-            new Path(basePath + "random_dir"),
-        };
-
-        // Ensure exception is thrown when there are no staging dirs.
-        fs.delete(new Path(basePath), true);
-        try {
-            EntityUtil.getLatestStagingPath(cluster, process);
-            Assert.fail("Exception expected");
-        } catch (FalconException e) {
-            // Do nothing
-        }
-
-        // Now create paths
-        for (Path path : paths) {
-            fs.create(path);
-        }
-
-        // Ensure latest is returned.
-        Assert.assertEquals(EntityUtil.getLatestStagingPath(cluster, process).getName(), md5 + "_1436357052992");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/FeedDataPathTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/FeedDataPathTest.java b/common/src/test/java/org/apache/falcon/entity/FeedDataPathTest.java
deleted file mode 100644
index 4c293bb..0000000
--- a/common/src/test/java/org/apache/falcon/entity/FeedDataPathTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- *
- */
-public class FeedDataPathTest {
-
-    @Test
-    public void testMinutesRegularExpression() {
-        String monthPattern = FeedDataPath.VARS.MINUTE.getValuePattern();
-        Assert.assertFalse("0".matches(monthPattern));
-        Assert.assertFalse("1".matches(monthPattern));
-        Assert.assertFalse("61".matches(monthPattern));
-        Assert.assertFalse("010".matches(monthPattern));
-        Assert.assertFalse("10 ".matches(monthPattern));
-        Assert.assertFalse(" 10".matches(monthPattern));
-
-
-        Assert.assertTrue("00".matches(monthPattern));
-        Assert.assertTrue("01".matches(monthPattern));
-        Assert.assertTrue("60".matches(monthPattern));
-    }
-
-    @Test
-    public void testHourRegularExpression() {
-        String hourPattern = FeedDataPath.VARS.HOUR.getValuePattern();
-        Assert.assertFalse("0".matches(hourPattern));
-        Assert.assertFalse("1".matches(hourPattern));
-        Assert.assertFalse("2".matches(hourPattern));
-        Assert.assertFalse("25".matches(hourPattern));
-        Assert.assertFalse("29".matches(hourPattern));
-        Assert.assertFalse("010".matches(hourPattern));
-        Assert.assertFalse("10 ".matches(hourPattern));
-        Assert.assertFalse(" 10".matches(hourPattern));
-
-
-        Assert.assertTrue("00".matches(hourPattern));
-        Assert.assertTrue("01".matches(hourPattern));
-        Assert.assertTrue("24".matches(hourPattern));
-        Assert.assertTrue("10".matches(hourPattern));
-        Assert.assertTrue("19".matches(hourPattern));
-        Assert.assertTrue("12".matches(hourPattern));
-    }
-
-
-    @Test
-    public void testDayRegularExpression() {
-        String dayPattern = FeedDataPath.VARS.DAY.getValuePattern();
-        Assert.assertFalse("0".matches(dayPattern));
-        Assert.assertFalse("1".matches(dayPattern));
-        Assert.assertFalse("32".matches(dayPattern));
-        Assert.assertFalse("00".matches(dayPattern));
-        Assert.assertFalse("010".matches(dayPattern));
-        Assert.assertFalse("10 ".matches(dayPattern));
-        Assert.assertFalse(" 10".matches(dayPattern));
-
-
-        Assert.assertTrue("01".matches(dayPattern));
-        Assert.assertTrue("10".matches(dayPattern));
-        Assert.assertTrue("29".matches(dayPattern));
-        Assert.assertTrue("30".matches(dayPattern));
-        Assert.assertTrue("31".matches(dayPattern));
-    }
-
-    @Test
-    public void testMonthRegularExpression() {
-        String monthPattern = FeedDataPath.VARS.MONTH.getValuePattern();
-        Assert.assertFalse("0".matches(monthPattern));
-        Assert.assertFalse("1".matches(monthPattern));
-        Assert.assertFalse("13".matches(monthPattern));
-        Assert.assertFalse("19".matches(monthPattern));
-        Assert.assertFalse("00".matches(monthPattern));
-        Assert.assertFalse("010".matches(monthPattern));
-        Assert.assertFalse("10 ".matches(monthPattern));
-        Assert.assertFalse(" 10".matches(monthPattern));
-
-
-        Assert.assertTrue("01".matches(monthPattern));
-        Assert.assertTrue("02".matches(monthPattern));
-        Assert.assertTrue("10".matches(monthPattern));
-        Assert.assertTrue("12".matches(monthPattern));
-    }
-
-    @Test
-    public void testYearRegularExpression() {
-        String monthPattern = FeedDataPath.VARS.YEAR.getValuePattern();
-        Assert.assertFalse("0".matches(monthPattern));
-        Assert.assertFalse("1".matches(monthPattern));
-        Assert.assertFalse("13".matches(monthPattern));
-        Assert.assertFalse("19".matches(monthPattern));
-        Assert.assertFalse("00".matches(monthPattern));
-        Assert.assertFalse("010".matches(monthPattern));
-        Assert.assertFalse("10 ".matches(monthPattern));
-        Assert.assertFalse(" 10".matches(monthPattern));
-
-
-        Assert.assertTrue("0001".matches(monthPattern));
-        Assert.assertTrue("2014".matches(monthPattern));
-    }
-
-
-}


[49/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/src/patches/OOZIE-1551-4.0.patch
----------------------------------------------------------------------
diff --git a/build-tools/src/patches/OOZIE-1551-4.0.patch b/build-tools/src/patches/OOZIE-1551-4.0.patch
deleted file mode 100644
index 965d475..0000000
--- a/build-tools/src/patches/OOZIE-1551-4.0.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-diff --git a/hadooplibs/hadoop-2/pom.xml b/hadooplibs/hadoop-2/pom.xml
-index 83c209e..e70847e 100644
---- a/hadooplibs/hadoop-2/pom.xml
-+++ b/hadooplibs/hadoop-2/pom.xml
-@@ -27,7 +27,7 @@
-     </parent>
-     <groupId>org.apache.oozie</groupId>
-     <artifactId>oozie-hadoop</artifactId>
--    <version>2.2.0-SNAPSHOT.oozie-4.0.0-falcon</version>
-+    <version>2.2.0.oozie-4.0.0-falcon</version>
-     <description>Apache Oozie Hadoop ${project.version}</description>
-     <name>Apache Oozie Hadoop ${project.version}</name>
-     <packaging>jar</packaging>
-@@ -36,7 +36,7 @@
-         <dependency>
-             <groupId>org.apache.hadoop</groupId>
-             <artifactId>hadoop-client</artifactId>
--            <version>2.2.0-SNAPSHOT</version>
-+            <version>2.2.0</version>
-             <scope>compile</scope>
-         </dependency>
-     </dependencies>
-diff --git a/hadooplibs/hadoop-distcp-2/pom.xml b/hadooplibs/hadoop-distcp-2/pom.xml
-index b9948fb..d60c561 100644
---- a/hadooplibs/hadoop-distcp-2/pom.xml
-+++ b/hadooplibs/hadoop-distcp-2/pom.xml
-@@ -27,7 +27,7 @@
-     </parent>
-     <groupId>org.apache.oozie</groupId>
-     <artifactId>oozie-hadoop-distcp</artifactId>
--    <version>2.2.0-SNAPSHOT.oozie-4.0.0-falcon</version>
-+    <version>2.2.0.oozie-4.0.0-falcon</version>
-     <description>Apache Oozie Hadoop Distcp ${project.version}</description>
-     <name>Apache Oozie Hadoop Distcp ${project.version}</name>
-     <packaging>jar</packaging>
-@@ -36,7 +36,7 @@
-         <dependency>
-             <groupId>org.apache.hadoop</groupId>
-             <artifactId>hadoop-distcp</artifactId>
--            <version>2.2.0-SNAPSHOT</version>
-+            <version>2.2.0</version>
-             <scope>compile</scope>
-         </dependency>
-     </dependencies>
-diff --git a/hadooplibs/hadoop-test-2/pom.xml b/hadooplibs/hadoop-test-2/pom.xml
-index 499871a..3af7e9f 100644
---- a/hadooplibs/hadoop-test-2/pom.xml
-+++ b/hadooplibs/hadoop-test-2/pom.xml
-@@ -27,7 +27,7 @@
-     </parent>
-     <groupId>org.apache.oozie</groupId>
-     <artifactId>oozie-hadoop-test</artifactId>
--    <version>2.2.0-SNAPSHOT.oozie-4.0.0-falcon</version>
-+    <version>2.2.0.oozie-4.0.0-falcon</version>
-     <description>Apache Oozie Hadoop ${project.version} Test</description>
-     <name>Apache Oozie Hadoop ${project.version} Test</name>
-     <packaging>jar</packaging>
-@@ -36,7 +36,7 @@
-         <dependency>
-             <groupId>org.apache.hadoop</groupId>
-             <artifactId>hadoop-minicluster</artifactId>
--            <version>2.2.0-SNAPSHOT</version>
-+            <version>2.2.0</version>
-             <scope>compile</scope>
-         </dependency>
-     </dependencies>
-diff --git a/pom.xml b/pom.xml
-index 73cedcf..f8fa3b4 100644
---- a/pom.xml
-+++ b/pom.xml
-@@ -1002,8 +1002,8 @@
-                 <activeByDefault>false</activeByDefault>
-             </activation>
-             <properties>
--               <hadoop.version>2.2.0-SNAPSHOT</hadoop.version>
--               <hadoop.auth.version>2.2.0-SNAPSHOT</hadoop.auth.version>
-+               <hadoop.version>2.2.0</hadoop.version>
-+               <hadoop.auth.version>2.2.0</hadoop.auth.version>
-                <pig.classifier>h2</pig.classifier>
-                <sqoop.classifier>hadoop200</sqoop.classifier>
-             </properties>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/src/patches/OOZIE-1741.patch
----------------------------------------------------------------------
diff --git a/build-tools/src/patches/OOZIE-1741.patch b/build-tools/src/patches/OOZIE-1741.patch
deleted file mode 100644
index e69b2d9..0000000
--- a/build-tools/src/patches/OOZIE-1741.patch
+++ /dev/null
@@ -1,397 +0,0 @@
-diff --git core/src/main/java/org/apache/oozie/coord/HCatELFunctions.java core/src/main/java/org/apache/oozie/coord/HCatELFunctions.java
-index e5f0146..9a36af0 100644
---- core/src/main/java/org/apache/oozie/coord/HCatELFunctions.java
-+++ core/src/main/java/org/apache/oozie/coord/HCatELFunctions.java
-@@ -115,6 +115,12 @@ public class HCatELFunctions {
-         return echoUnResolved("dataOutPartitions", "'" + dataOutName + "'");
-     }
-
-+    public static String ph1_coord_dataInPartitions_echo(String dataInName, String type) {
-+        // Checking if the dataIn/dataOut is correct?
-+        isValidDataEvent(dataInName);
-+        return echoUnResolved("dataInPartitions", "'" + dataInName + "', '" + type + "'");
-+    }
-+
-     public static String ph1_coord_dataOutPartitionValue_echo(String dataOutName, String partition) {
-         // Checking if the dataIn/dataOut is correct?
-         isValidDataEvent(dataOutName);
-@@ -266,6 +272,47 @@ public class HCatELFunctions {
-     }
-
-     /**
-+     * Used to specify the entire HCat partition defining input for workflow job. <p/> Look for two evaluator-level
-+     * variables <p/> A) .datain.<DATAIN_NAME> B) .datain.<DATAIN_NAME>.unresolved <p/> A defines the data-in HCat URI.
-+     * <p/> B defines whether there are any unresolved EL-function (i.e latest) <p/> If there are something unresolved,
-+     * this function will echo back the original function <p/> otherwise it sends the partition.
-+     *
-+     * @param dataInName : DataIn name
-+     * @param type : for action type: hive-export
-+     */
-+    public static String ph3_coord_dataInPartitions(String dataInName, String type) {
-+        ELEvaluator eval = ELEvaluator.getCurrent();
-+        String uri = (String) eval.getVariable(".datain." + dataInName);
-+        Boolean unresolved = (Boolean) eval.getVariable(".datain." + dataInName + ".unresolved");
-+        if (unresolved != null && unresolved.booleanValue() == true) {
-+            return "${coord:dataInPartitions('" + dataInName + "', '" + type + "')}";
-+        }
-+        String partitionValue = null;
-+        if (uri != null) {
-+            if (type.equals("hive-export")) {
-+                String[] uriList = uri.split(CoordELFunctions.DIR_SEPARATOR);
-+                if (uriList.length > 1) {
-+                    throw new RuntimeException("Multiple partitions not supported for hive-export type. Dataset name: "
-+                        + dataInName + " URI: " + uri);
-+                }
-+                try {
-+                    partitionValue = new HCatURI(uri).toPartitionValueString(type);
-+                }
-+                catch (URISyntaxException e) {
-+                    throw new RuntimeException("Parsing exception for HCatURI " + uri, e);
-+                }
-+            } else {
-+                  throw new RuntimeException("Unsupported type: " + type + " dataset name: " + dataInName);
-+            }
-+        }
-+        else {
-+            XLog.getLog(HCatELFunctions.class).warn("URI is null");
-+            return null;
-+        }
-+        return partitionValue;
-+    }
-+
-+    /**
-      * Used to specify the MAXIMUM value of an HCat partition which is input dependency for workflow job.<p/> Look for two evaluator-level
-      * variables <p/> A) .datain.<DATAIN_NAME> B) .datain.<DATAIN_NAME>.unresolved <p/> A defines the current list of
-      * HCat URIs. <p/> B defines whether there are any unresolved EL-function (i.e latest) <p/> If there are something
-diff --git core/src/main/resources/oozie-default.xml core/src/main/resources/oozie-default.xml
-index 455ef9d..889f10d 100644
---- core/src/main/resources/oozie-default.xml
-+++ core/src/main/resources/oozie-default.xml
-@@ -837,6 +837,7 @@
-             coord:dataInPartitionFilter=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionFilter_echo,
-             coord:dataInPartitionMin=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionMin_echo,
-             coord:dataInPartitionMax=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionMax_echo,
-+            coord:dataInPartitions=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitions_echo,
-             coord:dataOutPartitions=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataOutPartitions_echo,
-             coord:dataOutPartitionValue=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataOutPartitionValue_echo
-         </value>
-@@ -1101,6 +1102,7 @@
-             coord:dataInPartitionFilter=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionFilter,
-             coord:dataInPartitionMin=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionMin,
-             coord:dataInPartitionMax=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionMax,
-+            coord:dataInPartitions=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitions,
-             coord:dataOutPartitions=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataOutPartitions,
-             coord:dataOutPartitionValue=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataOutPartitionValue
-         </value>
-diff --git core/src/test/java/org/apache/oozie/coord/TestHCatELFunctions.java core/src/test/java/org/apache/oozie/coord/TestHCatELFunctions.java
-index f46f1ec..fac2177 100644
---- core/src/test/java/org/apache/oozie/coord/TestHCatELFunctions.java
-+++ core/src/test/java/org/apache/oozie/coord/TestHCatELFunctions.java
-@@ -264,6 +264,38 @@ public class TestHCatELFunctions extends XHCatTestCase {
-     }
-
-     /**
-+     * Test HCat dataInPartition EL function (phase 1) which echo back the EL
-+     * function itself
-+     *
-+     * @throws Exception
-+     */
-+    @Test
-+    public void testDataInPartitionsPh1() throws Exception {
-+        init("coord-job-submit-data");
-+        String expr = "${coord:dataInPartitions('ABC', 'hive-export')}";
-+        // +ve test
-+        eval.setVariable("oozie.dataname.ABC", "data-in");
-+        assertEquals("${coord:dataInPartitions('ABC', 'hive-export')}", CoordELFunctions.evalAndWrap(eval, expr));
-+        // -ve test
-+        expr = "${coord:dataInPartitions('ABCD', 'hive-export')}";
-+        try {
-+            CoordELFunctions.evalAndWrap(eval, expr);
-+            fail("should throw exception because Data-in is not defined");
-+        }
-+        catch (Exception ex) {
-+        }
-+        // -ve test
-+        expr = "${coord:dataInPartitions('ABCD')}";
-+        eval.setVariable("oozie.dataname.ABCD", "data-in");
-+        try {
-+            CoordELFunctions.evalAndWrap(eval, expr);
-+            fail("should throw exception because EL function requires 2 parameters");
-+        }
-+        catch (Exception ex) {
-+        }
-+    }
-+
-+    /**
-      * Test HCat dataOutPartition EL function (phase 1) which echo back the EL
-      * function itself
-      *
-@@ -463,6 +495,31 @@ public class TestHCatELFunctions extends XHCatTestCase {
-         assertTrue(res.equals("20"));
-     }
-
-+    /**
-+     * Test dataInPartitions EL function (phase 3) which returns the complete partition value string of a single partition
-+     * in case of hive-export type.
-+     *
-+     * @throws Exception
-+     */
-+    @Test
-+    public void testDataInPartitions() throws Exception {
-+        init("coord-action-start");
-+        String expr = "${coord:dataInPartitions('ABC', 'hive-export')}";
-+        eval.setVariable(".datain.ABC", "hcat://hcat.server.com:5080/mydb/clicks/datastamp=20120230;region=us");
-+        eval.setVariable(".datain.ABC.unresolved", Boolean.FALSE);
-+        String res = CoordELFunctions.evalAndWrap(eval, expr);
-+        assertTrue(res.equals("datastamp='20120230',region='us'") || res.equals("region='us',datastamp='20120230'"));
-+        // -ve test; execute EL function with any other type than hive-export
-+        try {
-+            expr = "${coord:dataInPartitions('ABC', 'invalid-type')}";
-+            eval.setVariable(".datain.ABC", "hcat://hcat.server.com:5080/mydb/clicks/datastamp=20120230;region=us");
-+            eval.setVariable(".datain.ABC.unresolved", Boolean.FALSE);
-+            res = CoordELFunctions.evalAndWrap(eval, expr);
-+            fail("EL function should throw exception because of invalid type");
-+        } catch (Exception e) {
-+        }
-+    }
-+
-     private void init(String tag) throws Exception {
-         init(tag, "hdfs://localhost:9000/user/" + getTestUser() + "/US/${YEAR}/${MONTH}/${DAY}");
-     }
-diff --git docs/src/site/twiki/CoordinatorFunctionalSpec.twiki docs/src/site/twiki/CoordinatorFunctionalSpec.twiki
-index a5ecbc5..621bd3d 100644
---- docs/src/site/twiki/CoordinatorFunctionalSpec.twiki
-+++ docs/src/site/twiki/CoordinatorFunctionalSpec.twiki
-@@ -2608,6 +2608,192 @@ C = foreach B generate foo, bar;
- store C into 'myOutputDatabase.myOutputTable' using org.apache.hcatalog.pig.HCatStorer('region=APAC,datestamp=20090102');
- </blockquote>
-
-+---++++ 6.8.8 coord:dataInPartitions(String name, String type) EL function
-+
-+The =${coord:dataInPartitions(String name, String type)}= EL function resolves to a list of partition key-value
-+pairs for the input-event dataset. Currently the only type supported is 'hive-export'. The 'hive-export' type
-+supports only one partition instance and it can be used to create the complete partition value string that can
-+be used in a hive query for partition export/import.
-+
-+The example below illustrates a hive export-import job triggered by a coordinator, using the EL functions for HCat database,
-+table, input partitions. The example replicates the hourly processed data across hive tables.
-+
-+*%GREEN% Example: %ENDCOLOR%*
-+
-+#HCatHiveExampleOne
-+
-+*Coordinator application definition:*
-+
-+<blockquote>
-+    <coordinator-app xmlns="uri:oozie:coordinator:0.3" name="app-coord"
-+    frequency="${coord:hours(1)}" start="2014-03-28T08:00Z"
-+    end="2030-01-01T00:00Z" timezone="UTC">
-+
-+    <datasets>
-+        <dataset name="Stats-1" frequency="${coord:hours(1)}"
-+            initial-instance="2014-03-28T08:00Z" timezone="UTC">
-+            <uri-template>hcat://foo:11002/myInputDatabase1/myInputTable1/year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR}
-+            </uri-template>
-+        </dataset>
-+        <dataset name="Stats-2" frequency="${coord:hours(1)}"
-+            initial-instance="2014-03-28T08:00Z" timezone="UTC">
-+            <uri-template>hcat://foo:11002/myInputDatabase2/myInputTable2/year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR}
-+            </uri-template>
-+        </dataset>
-+    </datasets>
-+    <input-events>
-+        <data-in name="processed-logs-1" dataset="Stats-1">
-+            <instance>${coord:current(0)}</instance>
-+        </data-in>
-+    </input-events>
-+    <output-events>
-+        <data-out name="processed-logs-2" dataset="Stats-2">
-+            <instance>${coord:current(0)}</instance>
-+        </data-out>
-+    </output-events>
-+    <action>
-+      <workflow>
-+        <app-path>hdfs://bar:8020/usr/joe/logsreplicator-wf</app-path>
-+        <configuration>
-+          <property>
-+            <name>EXPORT_DB</name>
-+            <value>${coord:databaseIn('processed-logs-1')}</value>
-+          </property>
-+          <property>
-+            <name>EXPORT_TABLE</name>
-+            <value>${coord:tableIn('processed-logs-1')}</value>
-+          </property>
-+          <property>
-+            <name>IMPORT_DB</name>
-+            <value>${coord:databaseOut('processed-logs-2')}</value>
-+          </property>
-+          <property>
-+            <name>IMPORT_TABLE</name>
-+            <value>${coord:tableOut('processed-logs-2')}</value>
-+          </property>
-+          <property>
-+            <name>EXPORT_PARTITION</name>
-+            <value>${coord:dataInPartitions('processed-logs-1', 'hive-export')}</value>
-+          </property>
-+          <property>
-+            <name>EXPORT_PATH</name>
-+            <value>hdfs://bar:8020/staging/${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd-HH')}/data</value>
-+          </property>
-+        </configuration>
-+      </workflow>
-+    </action>
-+</coordinator-app>
-+</blockquote>
-+
-+Parameterizing the input/output databases and tables using the corresponding EL function as shown will make them
-+available in the hive action of the workflow 'logsreplicator-wf'.
-+
-+Each coordinator action will use as input events the hourly instances of the 'processed-logs-1' dataset. The
-+=${coord:dataInPartitions(String name, String type)}= function enables the coordinator application to pass the
-+partition corresponding to hourly dataset instances to the workflow job triggered by the coordinator action.
-+The workflow passes this partition value to the hive export script that exports the hourly partition from source
-+database to the staging location referred as =EXPORT_PATH=. The hive import script imports the hourly partition from
-+=EXPORT_PATH= staging location into the target database.
-+
-+#HCatWorkflow
-+
-+*Workflow definition:*
-+
-+<blockquote>
-+<workflow-app xmlns="uri:oozie:workflow:0.3" name="logsreplicator-wf">
-+    <start to="table-export"/>
-+    <action name="table-export">
-+        <hive:hive xmlns:hive="uri:oozie:hive-action:0.2" xmlns="uri:oozie:hive-action:0.2">
-+            <job-tracker>${jobTracker}</job-tracker>
-+            <name-node>${nameNode}</name-node>
-+            <job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
-+            <configuration>
-+                <property>
-+                    <name>mapred.job.queue.name</name>
-+                    <value>${queueName}</value>
-+                </property>
-+                <property>
-+                    <name>oozie.launcher.mapred.job.priority</name>
-+                    <value>${jobPriority}</value>
-+                </property>
-+            </configuration>
-+            <script>${wf:appPath()}/scripts/table-export.hql</script>
-+            <param>sourceDatabase=${EXPORT_DB}</param>
-+            <param>sourceTable=${EXPORT_TABLE}</param>
-+            <param>sourcePartition=${EXPORT_PARTITION}</param>
-+            <param>sourceStagingDir=${EXPORT_PATH}</param>
-+        </hive:hive>
-+        <ok to="table-import"/>
-+        <error to="fail"/>
-+    </action>
-+    <action name="table-import">
-+        <hive:hive xmlns:hive="uri:oozie:hive-action:0.2" xmlns="uri:oozie:hive-action:0.2">
-+            <job-tracker>${jobTracker}</job-tracker>
-+            <name-node>${nameNode}</name-node>
-+            <job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
-+            <configuration>
-+                <property>
-+                    <name>mapred.job.queue.name</name>
-+                    <value>${queueName}</value>
-+                </property>
-+                <property>
-+                    <name>oozie.launcher.mapred.job.priority</name>
-+                    <value>${jobPriority}</value>
-+                </property>
-+            </configuration>
-+            <script>${wf:appPath()}/scripts/table-import.hql</script>
-+            <param>targetDatabase=${IMPORT_DB}</param>
-+            <param>targetTable=${IMPORT_TABLE}</param>
-+            <param>targetPartition=${EXPORT_PARTITION}</param>
-+            <param>sourceStagingDir=${EXPORT_PATH}</param>
-+        </hive:hive>
-+        <ok to="end"/>
-+        <error to="fail"/>
-+    </action>
-+    <kill name="fail">
-+        <message>
-+            Workflow failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
-+        </message>
-+    </kill>
-+    <end name="end"/>
-+</workflow-app>
-+</blockquote>
-+
-+Ensure that the following jars are in classpath, with versions corresponding to hcatalog installation:
-+hcatalog-core.jar, webhcat-java-client.jar, hive-common.jar, hive-exec.jar, hive-metastore.jar, hive-serde.jar,
-+ libfb303.jar. The hive-site.xml needs to be present in classpath as well.
-+
-+*Example Hive Export script:*
-+The following script exports a particular Hive table partition into staging location, where the partition value
-+ is computed through =${coord:dataInPartitions(String name, String type)}= EL function.
-+<blockquote>
-+export table ${sourceDatabase}.${sourceTable} partition (${sourcePartition}) to '${sourceStagingDir}';
-+</blockquote>
-+
-+For example, for the 2014-03-28T08:00Z run with the given dataset instances and ${coord:dataInPartitions(
-+'processed-logs-1', 'hive-export'), the above Hive script with resolved values would look like:
-+<blockquote>
-+export table myInputDatabase1/myInputTable1 partition (year='2014',month='03',day='28',hour='08') to 'hdfs://bar:8020/staging/2014-03-28-08';
-+</blockquote>
-+
-+*Example Hive Import script:*
-+The following script imports a particular Hive table partition from staging location, where the partition value is computed
-+ through =${coord:dataInPartitions(String name, String type)}= EL function.
-+<blockquote>
-+use ${targetDatabase};
-+alter table ${targetTable} drop if exists partition ${targetPartition};
-+import table ${targetTable} partition (${targetPartition}) from '${sourceStagingDir}';
-+</blockquote>
-+
-+For example, for the 2014-03-28T08:00Z run with the given dataset instances and ${coord:dataInPartitions(
-+'processed-logs-2', 'hive-export'), the above Hive script with resolved values would look like:
-+
-+<blockquote>
-+use myInputDatabase2;
-+alter table myInputTable2 drop if exists partition (year='2014',month='03',day='28',hour='08');
-+import table myInputTable2 partition (year='2014',month='03',day='28',hour='08') from 'hdfs://bar:8020/staging/2014-03-28-08';
-+</blockquote>
-+
-
- ---+++ 6.9. Parameterization of Coordinator Application
-
-diff --git sharelib/hcatalog/src/main/java/org/apache/oozie/util/HCatURI.java sharelib/hcatalog/src/main/java/org/apache/oozie/util/HCatURI.java
-index d797f9b..4bc5048 100644
---- sharelib/hcatalog/src/main/java/org/apache/oozie/util/HCatURI.java
-+++ sharelib/hcatalog/src/main/java/org/apache/oozie/util/HCatURI.java
-@@ -260,6 +260,35 @@ public class HCatURI {
-         return filter.toString();
-     }
-
-+    /**
-+     * Get the entire partition value string from partition map.
-+     * In case of type hive-export, it can be used to create entire partition value string
-+     * that can be used in Hive query for partition export/import.
-+     *
-+     * type hive-export
-+     * @return partition value string
-+     */
-+    public String toPartitionValueString(String type) {
-+        StringBuilder value = new StringBuilder();
-+        if (type.equals("hive-export")) {
-+            String comparator = "=";
-+            String separator = ",";
-+            for (Map.Entry<String, String> entry : partitions.entrySet()) {
-+                if (value.length() > 1) {
-+                    value.append(separator);
-+                }
-+                value.append(entry.getKey());
-+                value.append(comparator);
-+                value.append(PARTITION_VALUE_QUOTE);
-+                value.append(entry.getValue());
-+                value.append(PARTITION_VALUE_QUOTE);
-+            }
-+        } else {
-+            throw new RuntimeException("Unsupported type: " + type);
-+        }
-+        return value.toString();
-+    }
-+
-     @Override
-     public String toString() {
-         StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/src/patches/oozie-hadoop2-profile.patch
----------------------------------------------------------------------
diff --git a/build-tools/src/patches/oozie-hadoop2-profile.patch b/build-tools/src/patches/oozie-hadoop2-profile.patch
deleted file mode 100644
index c555f2d..0000000
--- a/build-tools/src/patches/oozie-hadoop2-profile.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/pom.xml b/pom.xml
-index 455a11d..ab4299c 100644
---- a/pom.xml
-+++ b/pom.xml
-@@ -1785,7 +1785,7 @@
-         <profile>
-             <id>hadoop-2</id>
-             <activation>
--                <activeByDefault>false</activeByDefault>
-+                <activeByDefault>true</activeByDefault>
-             </activation>
-             <properties>
-                 <hadoop.version>2.4.0</hadoop.version>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/src/patches/oozie-site.patch
----------------------------------------------------------------------
diff --git a/build-tools/src/patches/oozie-site.patch b/build-tools/src/patches/oozie-site.patch
deleted file mode 100644
index a368e7f..0000000
--- a/build-tools/src/patches/oozie-site.patch
+++ /dev/null
@@ -1,165 +0,0 @@
---- ./core/src/main/conf/oozie-site.xml
-+++ ./core/src/main/conf/oozie-site.xml
-@@ -376,4 +376,162 @@
- 
-     -->
- 
-+    <property>
-+        <name>oozie.base.url</name>
-+        <value>https://localhost:41443/oozie</value>
-+        <description>
-+            The Oozie base url.
-+        </description>
-+    </property>
-+
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-+        <value>
-+            now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-+            today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,
-+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,
-+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+            This property is a convenience property to add extensions to the built in executors without having to
-+            include all the built in ones.
-+        </description>
-+    </property>
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-+        <value>
-+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst,
-+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst,
-+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+            This property is a convenience property to add extensions to the built in executors without having to
-+            include all the built in ones.
-+        </description>
-+    </property>
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-+        <value>
-+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,
-+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,
-+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+            This property is a convenience property to add extensions to the built in executors without having to
-+            include all the built in ones.
-+        </description>
-+    </property>
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-+        <value>
-+            now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-+            today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,
-+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,
-+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-+            dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-+            dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-+            This property is a convenience property to add extensions to the built in executors without having to
-+            include all the built in ones.
-+        </description>
-+    </property>
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-+        <value>
-+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,
-+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,
-+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-+            latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-+            future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-+            dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-+            dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+            This property is a convenience property to add extensions to the built in executors without having to
-+            include all the built in ones.
-+        </description>
-+    </property>
-+
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-+        <value>
-+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+        </description>
-+    </property>
-+
-+    <property>
-+        <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-+        <value>
-+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
-+        </value>
-+        <description>
-+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-+        </description>
-+    </property>
-+
-+    <property>
-+        <name>oozie.service.HadoopAccessorService.supported.filesystems</name>
-+        <value>hdfs,hftp,webhdfs,jail</value>
-+        <description>
-+            Enlist the different filesystems supported for federation. If wildcard "*" is specified, 
-+            then ALL file schemes will be allowed.
-+        </description>
-+    </property>
-+
- </configuration>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/pom.xml
----------------------------------------------------------------------
diff --git a/cli/pom.xml b/cli/pom.xml
deleted file mode 100644
index 408c6ee..0000000
--- a/cli/pom.xml
+++ /dev/null
@@ -1,207 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-cli</artifactId>
-    <description>Apache Falcon CLI client</description>
-    <name>Apache Falcon CLI client</name>
-    <packaging>jar</packaging>
-
-    <profiles>
-        <profile>
-            <id>hadoop-2</id>
-            <activation>
-                <activeByDefault>true</activeByDefault>
-            </activation>
-            <dependencies>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-auth</artifactId>
-                    <scope>compile</scope>
-                </dependency>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-client</artifactId>
-                </dependency>
-            </dependencies>
-        </profile>
-    </profiles>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.falcon</groupId>
-            <artifactId>falcon-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-net</groupId>
-            <artifactId>commons-net</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-codec</groupId>
-            <artifactId>commons-codec</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-lang3</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-client</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-core</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-json</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-logging</groupId>
-            <artifactId>commons-logging</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-io</groupId>
-            <artifactId>commons-io</artifactId>
-            <version>2.4</version>
-        </dependency>
-
-        <dependency>
-            <groupId>jline</groupId>
-            <artifactId>jline</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.github.stephenc.findbugs</groupId>
-            <artifactId>findbugs-annotations</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.springframework.shell</groupId>
-            <artifactId>spring-shell</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.springframework</groupId>
-            <artifactId>spring-beans</artifactId>
-            <version>4.0.3.RELEASE</version>
-        </dependency>
-
-    </dependencies>
-
-    <build>
-        <plugins>
-            <!-- make the jar executable by adding a Main-Class and Class-Path to the manifest -->
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-shade-plugin</artifactId>
-                <version>2.4.1</version>
-                <configuration>
-                    <shadedArtifactAttached>false</shadedArtifactAttached>
-                    <createDependencyReducedPom>false</createDependencyReducedPom>
-                    <artifactSet>
-                        <includes>
-                            <include>org.apache.hadoop:hadoop-auth</include>
-                            <include>org.apache.falcon:*</include>
-                            <include>org.apache.commons:*</include>
-                            <include>commons-logging:*</include>
-                            <include>commons-net:*</include>
-                            <include>commons-codec:*</include>
-                            <include>commons-io:*</include>
-                            <include>jline:*</include>
-                            <include>org.slf4j:*</include>
-                            <include>log4j:*</include>
-                            <include>com.sun.jersey:*</include>
-                            <include>org.springframework:*</include>
-                            <include>org.springframework.shell:*</include>
-                        </includes>
-                    </artifactSet>
-                    <filters>
-                        <filter>
-                            <artifact>*:*</artifact>
-                            <excludes>
-                                <exclude>META-INF/*.SF</exclude>
-                                <exclude>META-INF/*.DSA</exclude>
-                                <exclude>META-INF/*.RSA</exclude>
-                            </excludes>
-                        </filter>
-                    </filters>
-                </configuration>
-                <executions>
-                    <execution>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>shade</goal>
-                        </goals>
-                        <configuration>
-                            <transformers>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                                <transformer
-                                        implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer"/>
-                            </transformers>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/FalconCLIRuntimeException.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/FalconCLIRuntimeException.java b/cli/src/main/java/org/apache/falcon/cli/FalconCLIRuntimeException.java
deleted file mode 100644
index b7fa4cd..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/FalconCLIRuntimeException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-/**
- * Runtime exception in CLI. Since most methods are invoked through reflection, checked exceptions
- * end up being thrown as UndeclaredThrowableException. Instead of that, let's throw our own RuntimeException.
- */
-public class FalconCLIRuntimeException extends RuntimeException {
-    public FalconCLIRuntimeException(Throwable e) {
-        super(e);
-    }
-
-    public FalconCLIRuntimeException(String message) {
-        super(message);
-    }
-
-    public FalconCLIRuntimeException(String message, Throwable cause) {
-        super(message, cause);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java b/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java
deleted file mode 100644
index dbd28fb..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/commands/BaseFalconCommands.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli.commands;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.cli.FalconCLIRuntimeException;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-import org.springframework.shell.core.ExecutionProcessor;
-import org.springframework.shell.event.ParseResult;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Properties;
-
-/**
- * Common code for all falcon command classes.
- */
-public class BaseFalconCommands implements ExecutionProcessor {
-    private static final String FALCON_URL = "FALCON_URL";
-    private static final String FALCON_URL_PROPERTY = "falcon.url";
-    private static final String DO_AS = "DO_AS";
-    private static final String DO_AS_PROPERTY = "do.as";
-    private static final String CLIENT_PROPERTIES = "/client.properties";
-    private static Properties clientProperties;
-    private static Properties backupProperties = new Properties();
-    private static FalconClient client;
-
-    protected static Properties getClientProperties() {
-        if (clientProperties == null) {
-            InputStream inputStream = null;
-            Properties prop = new Properties(System.getProperties());
-            prop.putAll(backupProperties);
-            try {
-                inputStream = BaseFalconCommands.class.getResourceAsStream(CLIENT_PROPERTIES);
-                if (inputStream != null) {
-                    try {
-                        prop.load(inputStream);
-                    } catch (IOException e) {
-                        throw new FalconCLIRuntimeException(e);
-                    }
-                }
-            } finally {
-                IOUtils.closeQuietly(inputStream);
-            }
-            String urlOverride = System.getenv(FALCON_URL);
-            if (urlOverride != null) {
-                prop.setProperty(FALCON_URL_PROPERTY, urlOverride);
-            }
-            if (prop.getProperty(FALCON_URL_PROPERTY) == null) {
-                throw new FalconCLIRuntimeException("Failed to get falcon url from environment or client properties");
-            }
-            String doAsOverride = System.getenv(DO_AS);
-            if (doAsOverride != null) {
-                prop.setProperty(DO_AS_PROPERTY, doAsOverride);
-            }
-            clientProperties = prop;
-            backupProperties.clear();
-        }
-        return clientProperties;
-    }
-
-    static void setClientProperty(String key, String value) {
-        Properties props;
-        try {
-            props = getClientProperties();
-        } catch (FalconCLIRuntimeException e) {
-            props = backupProperties;
-        }
-        if (StringUtils.isBlank(value)) {
-            props.remove(key);
-        } else {
-            props.setProperty(key, value);
-        }
-        // Re-load client in the next call
-        client = null;
-    }
-
-    public static FalconClient getFalconClient() {
-        if (client == null) {
-            try {
-                client = new FalconClient(getClientProperties().getProperty(FALCON_URL_PROPERTY),
-                        getClientProperties());
-            } catch (FalconCLIException e) {
-                throw new FalconCLIRuntimeException(e.getMessage(), e.getCause());
-            }
-        }
-        return client;
-    }
-
-    @Override
-    public ParseResult beforeInvocation(ParseResult parseResult) {
-        Object[] args = parseResult.getArguments();
-        if (args != null) {
-            boolean allEqual = true;
-            for (int i = 1; i < args.length; i++) {
-                allEqual &= args[0].equals(args[i]);
-            }
-            if (allEqual) {
-                if (args[0] instanceof String) {
-                    String[] split = ((String) args[0]).split("\\s+");
-                    Object[] newArgs = new String[args.length];
-                    System.arraycopy(split, 0, newArgs, 0, split.length);
-                    parseResult = new ParseResult(parseResult.getMethod(), parseResult.getInstance(), newArgs);
-                }
-            }
-        }
-        return parseResult;
-    }
-
-    @Override
-    public void afterReturningInvocation(ParseResult parseResult, Object o) {
-
-    }
-
-    @Override
-    public void afterThrowingInvocation(ParseResult parseResult, Throwable throwable) {
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java b/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java
deleted file mode 100644
index cabe5a8..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconConnectionCommands.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.falcon.cli.commands;
-
-
-import org.apache.commons.lang3.StringUtils;
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import javax.annotation.Nonnull;
-
-/**
- * Connection Commands.
- */
-public class FalconConnectionCommands extends BaseFalconCommands {
-
-    @CliCommand(value = "get", help = "get properties")
-    public String getParameter(@CliOption(key = {"", "key"}, mandatory = false, help = "<key>") final String key) {
-        if (StringUtils.isBlank(key)) {
-            return getClientProperties().toString();
-        }
-        return getClientProperties().getProperty(key);
-    }
-
-    @CliCommand(value = "set", help = "set properties")
-    public void setParameter(@CliOption(key = {"", "keyval"}, mandatory = true, help = "<key-val>")
-                             @Nonnull final String keyVal) {
-        String[] kvArray = keyVal.split("=");
-        String key = "";
-        String value = "";
-        if (kvArray.length > 0) {
-            key = kvArray[0];
-        }
-        if (kvArray.length > 1) {
-            value = kvArray[1];
-        }
-        setClientProperty(key, value);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java b/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java
deleted file mode 100644
index 6e091ef..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconEntityCommands.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli.commands;
-
-/**
- * Entity Commands.
- */
-public class FalconEntityCommands extends BaseFalconCommands {
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java b/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java
deleted file mode 100644
index 8f3a2fc..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/commands/FalconInstanceCommands.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli.commands;
-
-/**
- * Instance commands.
- */
-public class FalconInstanceCommands extends BaseFalconCommands {
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java b/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java
deleted file mode 100644
index 03c56c9..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconBanner.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.falcon.cli.skel;
-
-import org.springframework.core.Ordered;
-import org.springframework.core.annotation.Order;
-import org.springframework.shell.plugin.support.DefaultBannerProvider;
-import org.springframework.shell.support.util.OsUtils;
-import org.springframework.stereotype.Component;
-
-/**
- * The Class FalconBanner.
- */
-@Component
-@Order(Ordered.HIGHEST_PRECEDENCE)
-public class FalconBanner extends DefaultBannerProvider {
-
-    @Override
-    public String getBanner() {
-        return new StringBuilder()
-                .append("=======================================").append(OsUtils.LINE_SEPARATOR)
-                .append("*                                     *").append(OsUtils.LINE_SEPARATOR)
-                .append("*            Falcon CLI               *").append(OsUtils.LINE_SEPARATOR)
-                .append("*                                     *").append(OsUtils.LINE_SEPARATOR)
-                .append("=======================================").append(OsUtils.LINE_SEPARATOR)
-                .toString();
-
-    }
-
-    @Override
-    public String getWelcomeMessage() {
-        return "Welcome to Falcon CLI";
-    }
-
-    @Override
-    public String getVersion() {
-        return getClass().getPackage().getImplementationVersion();
-    }
-
-    @Override
-    public String getProviderName() {
-        return "Falcon CLI";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java b/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java
deleted file mode 100644
index 74d003a..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconHistoryFileProvider.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli.skel;
-
-import org.springframework.core.Ordered;
-import org.springframework.core.annotation.Order;
-import org.springframework.shell.plugin.support.DefaultHistoryFileNameProvider;
-import org.springframework.stereotype.Component;
-
-/**
- * The Class FalconHistoryFileProvider.
- */
-@Component
-@Order(Ordered.HIGHEST_PRECEDENCE)
-public class FalconHistoryFileProvider extends DefaultHistoryFileNameProvider {
-
-    public String getHistoryFileName() {
-        return "falcon-cli-hist.log";
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.springframework.shell.plugin.support.DefaultHistoryFileNameProvider#name()
-     */
-    @Override
-    public String getProviderName() {
-        return "falcon client history provider";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java
----------------------------------------------------------------------
diff --git a/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java b/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java
deleted file mode 100644
index d8ead5b..0000000
--- a/cli/src/main/java/org/apache/falcon/cli/skel/FalconPromptProvider.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli.skel;
-
-import org.springframework.core.Ordered;
-import org.springframework.core.annotation.Order;
-import org.springframework.shell.plugin.support.DefaultPromptProvider;
-import org.springframework.stereotype.Component;
-
-/**
- * The Class FalconPromptProvider.
- */
-@Component
-@Order(Ordered.HIGHEST_PRECEDENCE)
-public class FalconPromptProvider extends DefaultPromptProvider {
-
-    @Override
-    public String getPrompt() {
-        return "falcon-shell>";
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see org.springframework.shell.plugin.support.DefaultPromptProvider#name()
-     */
-    @Override
-    public String getProviderName() {
-        return "falcon prompt provider";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml
----------------------------------------------------------------------
diff --git a/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml b/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml
deleted file mode 100644
index bd0fed4..0000000
--- a/cli/src/main/resources/META-INF/spring/spring-shell-plugin.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-
--->
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xmlns:context="http://www.springframework.org/schema/context"
-       xsi:schemaLocation="
-  http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
-  http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.1.xsd">
-
-    <context:component-scan base-package="org.apache.falcon.cli"/>
-
-    <bean id="falconConnectionCommands"
-          class="org.apache.falcon.cli.commands.FalconConnectionCommands">
-    </bean>
-    <bean id="falconEntityCommands"
-          class="org.apache.falcon.cli.commands.FalconEntityCommands">
-    </bean>
-    <bean id="falconInstanceCommands"
-          class="org.apache.falcon.cli.commands.FalconInstanceCommands">
-    </bean>
-</beans>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/pom.xml
----------------------------------------------------------------------
diff --git a/client/pom.xml b/client/pom.xml
deleted file mode 100644
index 743b93d..0000000
--- a/client/pom.xml
+++ /dev/null
@@ -1,161 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-    
-       http://www.apache.org/licenses/LICENSE-2.0
-        
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-client</artifactId>
-    <description>Apache Falcon Java client</description>
-    <name>Apache Falcon Java client</name>
-    <packaging>jar</packaging>
-
-    <profiles>
-        <profile>
-            <id>hadoop-2</id>
-            <activation>
-                <activeByDefault>true</activeByDefault>
-            </activation>
-            <dependencies>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-auth</artifactId>
-                </dependency>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-client</artifactId>
-                </dependency>
-            </dependencies>
-        </profile>
-    </profiles>
-
-    <dependencies>
-        <dependency>
-            <groupId>commons-cli</groupId>
-            <artifactId>commons-cli</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-net</groupId>
-            <artifactId>commons-net</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-codec</groupId>
-            <artifactId>commons-codec</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-lang3</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-json</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-io</groupId>
-            <artifactId>commons-io</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.github.stephenc.findbugs</groupId>
-            <artifactId>findbugs-annotations</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hive.hcatalog</groupId>
-            <artifactId>hive-webhcat-java-client</artifactId>
-            <version>${hive.version}</version>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <artifactId>maven-dependency-plugin</artifactId>
-                <executions>
-                    <execution>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/dependency</outputDirectory>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-            <plugin>
-                <groupId>org.jvnet.jaxb2.maven2</groupId>
-                <artifactId>maven-jaxb2-plugin</artifactId>
-                <version>0.8.0</version>
-                <executions>
-                    <execution>
-                        <id>1</id>
-                        <phase>generate-sources</phase>
-                        <goals>
-                            <goal>generate</goal>
-                        </goals>
-                    </execution>
-                </executions>
-                <configuration>
-                    <args>
-                        <arg>-extension</arg>
-                        <arg>-Xinheritance</arg>
-                    </args>
-                    <plugins>
-                        <plugin>
-                            <groupId>org.jvnet.jaxb2_commons</groupId>
-                            <artifactId>jaxb2-basics</artifactId>
-                            <version>0.6.3</version>
-                        </plugin>
-                    </plugins>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/LifeCycle.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/LifeCycle.java b/client/src/main/java/org/apache/falcon/LifeCycle.java
deleted file mode 100644
index 0ecddd1..0000000
--- a/client/src/main/java/org/apache/falcon/LifeCycle.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon;
-
-/**
- * Represents life cycle of an entity.
- */
-
-public enum LifeCycle {
-    EXECUTION(Tag.DEFAULT),
-    EVICTION(Tag.RETENTION),
-    REPLICATION(Tag.REPLICATION),
-    IMPORT(Tag.IMPORT),
-    EXPORT(Tag.EXPORT);
-
-    private final Tag tag;
-
-    LifeCycle(Tag tag) {
-        this.tag = tag;
-    }
-
-    public Tag getTag() {
-        return this.tag;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/Pair.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/Pair.java b/client/src/main/java/org/apache/falcon/Pair.java
deleted file mode 100644
index d4cea90..0000000
--- a/client/src/main/java/org/apache/falcon/Pair.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon;
-
-import java.io.Serializable;
-
-/**
- * Simple pair class to hold a pair of object of specific class.
- * @param <A> - First element in pair.
- * @param <B> - Second element in pair
- */
-public class Pair<A, B> implements Serializable {
-
-    private static final long serialVersionUID = 1L;
-
-    //SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-    public final A first;
-    public final B second;
-    //RESUME CHECKSTYLE CHECK VisibilityModifierCheck
-
-    public Pair(A fst, B snd) {
-        this.first = fst;
-        this.second = snd;
-    }
-
-    public static <A, B> Pair<A, B> of(A a, B b) {
-        return new Pair<A, B>(a, b);
-    }
-
-    @Override
-    public String toString() {
-        return "(" + first + "," + second + ")";
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-
-        Pair pair = (Pair) o;
-
-        if (first != null ? !first.equals(pair.first) : pair.first != null) {
-            return false;
-        }
-        if (second != null ? !second.equals(pair.second) : pair.second != null) {
-            return false;
-        }
-
-        return true;
-    }
-
-    @Override
-    public int hashCode() {
-        int result = first != null ? first.hashCode() : 0;
-        result = 31 * result + (second != null ? second.hashCode() : 0);
-        return result;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/ResponseHelper.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/ResponseHelper.java b/client/src/main/java/org/apache/falcon/ResponseHelper.java
deleted file mode 100644
index 8f22af7..0000000
--- a/client/src/main/java/org/apache/falcon/ResponseHelper.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.resource.EntitySummaryResult;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.FeedLookupResult;
-import org.apache.falcon.resource.InstanceDependencyResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-import org.apache.falcon.resource.SchedulableEntityInstanceResult;
-import org.apache.falcon.resource.TriageResult;
-
-import java.util.Date;
-import java.util.Formatter;
-import java.util.Map;
-
-/**
- * Helpers for response object to string conversion.
- */
-
-public final class ResponseHelper {
-
-    private ResponseHelper() { }
-
-    public static String getString(EntitySummaryResult result) {
-        StringBuilder sb = new StringBuilder();
-        String toAppend;
-        sb.append("Consolidated Status: ").append(result.getStatus())
-            .append("\n");
-        sb.append("\nEntity Summary Result :\n");
-        if (result.getEntitySummaries() != null) {
-            for (EntitySummaryResult.EntitySummary entitySummary : result.getEntitySummaries()) {
-                toAppend = entitySummary.toString();
-                sb.append(toAppend).append("\n");
-            }
-        }
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(result.getMessage());
-        sb.append("Request Id: ").append(result.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(InstancesResult result, String runid) {
-        StringBuilder sb = new StringBuilder();
-        String toAppend;
-
-        sb.append("Consolidated Status: ").append(result.getStatus())
-            .append("\n");
-
-        sb.append("\nInstances:\n");
-        sb.append("Instance\t\tCluster\t\tSourceCluster\t\tStatus\t\tRunID\t\t\tLog\n");
-        sb.append("-----------------------------------------------------------------------------------------------\n");
-        if (result.getInstances() != null) {
-            for (InstancesResult.Instance instance : result.getInstances()) {
-
-                toAppend =
-                    (instance.getInstance() != null) ? instance.getInstance()
-                        : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getCluster() != null ? instance.getCluster() : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getSourceCluster() != null ? instance
-                        .getSourceCluster() : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    (instance.getStatus() != null ? instance.getStatus()
-                        .toString() : "-");
-                sb.append(toAppend).append("\t");
-
-                toAppend = (runid != null ? runid : "latest");
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getLogFile() != null ? instance.getLogFile() : "-";
-                sb.append(toAppend).append("\n");
-
-                if (instance.actions != null) {
-                    sb.append("actions:\n");
-                    for (InstancesResult.InstanceAction action : instance.actions) {
-                        sb.append("    ").append(action.getAction())
-                            .append("\t");
-                        sb.append(action.getStatus()).append("\t")
-                            .append(action.getLogFile()).append("\n");
-                    }
-                }
-            }
-        }
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(result.getMessage());
-        sb.append("Request Id: ").append(result.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(FeedInstanceResult result) {
-        StringBuilder sb = new StringBuilder();
-        String toAppend;
-
-        sb.append("Consolidated Status: ").append(result.getStatus())
-            .append("\n");
-
-        sb.append("\nInstances:\n");
-        Formatter formatter = new Formatter(sb);
-        formatter.format("%-16s%-20s%-16s%-16s%-20s%-16s", "Cluster", "Instance", "Status", "Size", "CreationTime",
-                "Details");
-        sb.append("\n");
-        sb.append("-----------------------------------------------------------------------------------------------\n");
-        if (result.getInstances() != null) {
-            for (FeedInstanceResult.Instance instance : result.getInstances()) {
-                formatter.format("%-16s", instance.getCluster() != null ? instance.getCluster() : "-");
-                formatter.format("%-20s", instance.getInstance() != null ? instance.getInstance() : "-");
-                formatter.format("%-16s", instance.getStatus() != null ? instance.getStatus() : "-");
-                formatter.format("%-16s", instance.getSize() != -1 ? instance.getSizeH() : "-");
-                formatter.format("%-20s", instance.getCreationTime() != 0
-                        ? SchemaHelper.formatDateUTC(new Date(instance.getCreationTime())) : "-");
-                formatter.format("%-16s", StringUtils.isEmpty(instance.getUri()) ? "-" : instance.getUri());
-                sb.append("\n");
-            }
-        }
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(result.getMessage());
-        sb.append("Request Id: ").append(result.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(InstancesResult result) {
-        StringBuilder sb = new StringBuilder();
-        String toAppend;
-
-        sb.append("Consolidated Status: ").append(result.getStatus())
-            .append("\n");
-
-        sb.append("\nInstances:\n");
-        sb.append("Instance\t\tCluster\t\tSourceCluster\t\tStatus\t\tStart\t\tEnd\t\tDetails\t\t\t\t\tLog\n");
-        sb.append("-----------------------------------------------------------------------------------------------\n");
-        if (result.getInstances() != null) {
-            for (InstancesResult.Instance instance : result.getInstances()) {
-
-                toAppend =
-                    instance.getInstance() != null ? instance.getInstance()
-                        : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getCluster() != null ? instance.getCluster() : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getSourceCluster() != null ? instance
-                        .getSourceCluster() : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    (instance.getStatus() != null ? instance.getStatus()
-                        .toString() : "-");
-                sb.append(toAppend).append("\t");
-
-                toAppend = instance.getStartTime() != null
-                    ? SchemaHelper.formatDateUTC(instance.getStartTime()) : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend = instance.getEndTime() != null
-                    ? SchemaHelper.formatDateUTC(instance.getEndTime()) : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend = (!StringUtils.isEmpty(instance.getDetails()))
-                    ? instance.getDetails() : "-";
-                sb.append(toAppend).append("\t");
-
-                toAppend =
-                    instance.getLogFile() != null ? instance.getLogFile() : "-";
-                sb.append(toAppend).append("\n");
-
-                if (instance.getWfParams() != null) {
-                    InstancesResult.KeyValuePair[] props = instance.getWfParams();
-                    sb.append("Workflow params").append("\n");
-                    for (InstancesResult.KeyValuePair entry : props) {
-                        sb.append(entry.getKey()).append("=")
-                            .append(entry.getValue()).append("\n");
-                    }
-                    sb.append("\n");
-                }
-
-                if (instance.actions != null) {
-                    sb.append("actions:\n");
-                    for (InstancesResult.InstanceAction action : instance.actions) {
-                        sb.append(" ").append(action.getAction()).append("\t");
-                        sb.append(action.getStatus()).append("\t")
-                            .append(action.getLogFile()).append("\n");
-                    }
-                }
-            }
-        }
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(result.getMessage());
-        sb.append("Request Id: ").append(result.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(InstancesSummaryResult result) {
-        StringBuilder sb = new StringBuilder();
-        String toAppend;
-
-        sb.append("Consolidated Status: ").append(result.getStatus())
-            .append("\n");
-        sb.append("\nInstances Summary:\n");
-
-        if (result.getInstancesSummary() != null) {
-            for (InstancesSummaryResult.InstanceSummary summary : result
-                .getInstancesSummary()) {
-                toAppend =
-                    summary.getCluster() != null ? summary.getCluster() : "-";
-                sb.append("Cluster: ").append(toAppend).append("\n");
-
-                sb.append("Status\t\tCount\n");
-                sb.append("-------------------------\n");
-
-                for (Map.Entry<String, Long> entry : summary.getSummaryMap()
-                    .entrySet()) {
-                    sb.append(entry.getKey()).append("\t\t")
-                        .append(entry.getValue()).append("\n");
-                }
-            }
-        }
-
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(result.getMessage());
-        sb.append("Request Id: ").append(result.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(TriageResult triageResult) {
-        StringBuilder sb = new StringBuilder();
-
-        sb.append(triageResult.toString());
-        sb.append("\nAdditional Information:\n");
-        sb.append("Response: ").append(triageResult.getMessage());
-        sb.append("Request Id: ").append(triageResult.getRequestId());
-
-        return sb.toString();
-    }
-
-    public static String getString(FeedLookupResult feedLookupResult) {
-        StringBuilder sb = new StringBuilder();
-        String results = feedLookupResult.toString();
-        if (StringUtils.isEmpty(results)) {
-            sb.append("No matching feeds found!");
-        } else {
-            sb.append(results);
-        }
-        sb.append("\n\nResponse: ").append(feedLookupResult.getMessage());
-        sb.append("\nRequest Id: ").append(feedLookupResult.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(InstanceDependencyResult dependencyResult) {
-        StringBuilder sb = new StringBuilder();
-        String results = dependencyResult.toString();
-        if (StringUtils.isEmpty(results)) {
-            sb.append("No dependencies found!");
-        } else {
-            sb.append(results);
-        }
-        sb.append("\n\nResponse: ").append(dependencyResult.getMessage());
-        sb.append("\nRequest Id: ").append(dependencyResult.getRequestId());
-        return sb.toString();
-    }
-
-    public static String getString(SchedulableEntityInstanceResult instances) {
-        StringBuilder sb = new StringBuilder();
-        String results = instances.toString();
-        if (StringUtils.isEmpty(results)) {
-            sb.append("No sla miss found!");
-        } else {
-            sb.append(results);
-        }
-        sb.append("\n\nResponse: ").append(instances.getMessage());
-        sb.append("\nRequest Id: ").append(instances.getRequestId());
-        return sb.toString();
-    }
-}


[37/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/InstanceRelationshipGraphBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/InstanceRelationshipGraphBuilder.java b/common/src/main/java/org/apache/falcon/metadata/InstanceRelationshipGraphBuilder.java
deleted file mode 100644
index b709857..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/InstanceRelationshipGraphBuilder.java
+++ /dev/null
@@ -1,381 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.Vertex;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.CatalogStorage;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.workflow.WorkflowExecutionArgs;
-import org.apache.falcon.workflow.WorkflowExecutionContext;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.URISyntaxException;
-import java.util.Date;
-import java.util.TimeZone;
-
-/**
- * Instance Metadata relationship mapping helper.
- */
-public class InstanceRelationshipGraphBuilder extends RelationshipGraphBuilder {
-
-    private static final Logger LOG = LoggerFactory.getLogger(InstanceRelationshipGraphBuilder.class);
-
-    private static final String FEED_INSTANCE_FORMAT = "yyyyMMddHHmm"; // computed
-    private static final String NONE = "NONE";
-    private static final String IGNORE = "IGNORE";
-
-    // process workflow properties from message
-    private static final WorkflowExecutionArgs[] INSTANCE_WORKFLOW_PROPERTIES = {
-        WorkflowExecutionArgs.USER_WORKFLOW_NAME,
-        WorkflowExecutionArgs.USER_WORKFLOW_ENGINE,
-        WorkflowExecutionArgs.WORKFLOW_ID,
-        WorkflowExecutionArgs.RUN_ID,
-        WorkflowExecutionArgs.STATUS,
-        WorkflowExecutionArgs.WF_ENGINE_URL,
-        WorkflowExecutionArgs.USER_SUBFLOW_ID,
-    };
-
-
-    public InstanceRelationshipGraphBuilder(Graph graph, boolean preserveHistory) {
-        super(graph, preserveHistory);
-    }
-
-    public Vertex addProcessInstance(WorkflowExecutionContext context) throws FalconException {
-        String processInstanceName = getProcessInstanceName(context);
-        LOG.info("Adding process instance: {}", processInstanceName);
-
-        Vertex processInstance = addVertex(processInstanceName,
-                RelationshipType.PROCESS_INSTANCE, context.getTimeStampAsLong());
-        addWorkflowInstanceProperties(processInstance, context);
-
-        addInstanceToEntity(processInstance, context.getEntityName(),
-                RelationshipType.PROCESS_ENTITY, RelationshipLabel.INSTANCE_ENTITY_EDGE);
-        addInstanceToEntity(processInstance, context.getClusterName(),
-                RelationshipType.CLUSTER_ENTITY, RelationshipLabel.PROCESS_CLUSTER_EDGE);
-        addInstanceToEntity(processInstance, context.getWorkflowUser(),
-                RelationshipType.USER, RelationshipLabel.USER);
-
-        if (isPreserveHistory()) {
-            Process process = ConfigurationStore.get().get(EntityType.PROCESS, context.getEntityName());
-            addDataClassification(process.getTags(), processInstance);
-            addPipelines(process.getPipelines(), processInstance);
-        }
-
-        addCounters(processInstance, context);
-
-        return processInstance;
-    }
-
-    private void addCounters(Vertex processInstance, WorkflowExecutionContext context) throws FalconException {
-        String counterString = getCounterString(context);
-        if (!StringUtils.isBlank(counterString)) {
-            addCountersToInstance(counterString, processInstance);
-        }
-    }
-
-    private String getCounterString(WorkflowExecutionContext context) {
-        if (!StringUtils.isBlank(context.getCounters())) {
-            return context.getCounters();
-        }
-        return null;
-    }
-
-    public String getProcessInstanceName(WorkflowExecutionContext context) {
-        return context.getEntityName() + "/" + context.getNominalTimeAsISO8601();
-    }
-
-    public void addWorkflowInstanceProperties(Vertex processInstance,
-                                              WorkflowExecutionContext context) {
-        for (WorkflowExecutionArgs instanceWorkflowProperty : INSTANCE_WORKFLOW_PROPERTIES) {
-            addProperty(processInstance, context, instanceWorkflowProperty);
-        }
-
-        processInstance.setProperty(RelationshipProperty.VERSION.getName(),
-                context.getUserWorkflowVersion());
-    }
-
-    private void addProperty(Vertex vertex, WorkflowExecutionContext context,
-                             WorkflowExecutionArgs optionName) {
-        String value = context.getValue(optionName);
-        if (value == null || value.length() == 0) {
-            return;
-        }
-
-        vertex.setProperty(optionName.getName(), value);
-    }
-
-    private void addCountersToInstance(String counterString, Vertex vertex) throws FalconException {
-        String[] counterKeyValues = counterString.split(",");
-        try {
-            for (String counter : counterKeyValues) {
-                String[] keyVals = counter.split(":", 2);
-                vertex.setProperty(keyVals[0], Long.parseLong(keyVals[1]));
-            }
-        } catch (NumberFormatException e) {
-            throw new FalconException("Invalid values for counter:" + e);
-        }
-    }
-
-    public void addInstanceToEntity(Vertex instanceVertex, String entityName,
-                                    RelationshipType entityType, RelationshipLabel edgeLabel) {
-        addInstanceToEntity(instanceVertex, entityName, entityType, edgeLabel, null);
-    }
-
-    public void addInstanceToEntity(Vertex instanceVertex, String entityName,
-                                    RelationshipType entityType, RelationshipLabel edgeLabel,
-                                    String timestamp) {
-        Vertex entityVertex = findVertex(entityName, entityType);
-        LOG.info("Vertex exists? name={}, type={}, v={}", entityName, entityType, entityVertex);
-        if (entityVertex == null) {
-            LOG.error("Illegal State: {} vertex must exist for {}", entityType, entityName);
-            throw new IllegalStateException(entityType + " entity vertex must exist " + entityName);
-        }
-
-        addEdge(instanceVertex, entityVertex, edgeLabel.getName(), timestamp);
-    }
-
-    public void addOutputFeedInstances(WorkflowExecutionContext context,
-                                       Vertex processInstance) throws FalconException {
-        String outputFeedNamesArg = context.getOutputFeedNames();
-        if (NONE.equals(outputFeedNamesArg) || IGNORE.equals(outputFeedNamesArg)) {
-            return; // there are no output feeds for this process
-        }
-
-        String[] outputFeedNames = context.getOutputFeedNamesList();
-        String[] outputFeedInstancePaths = context.getOutputFeedInstancePathsList();
-
-        for (int index = 0; index < outputFeedNames.length; index++) {
-            String feedName = outputFeedNames[index];
-            String feedInstanceDataPath = outputFeedInstancePaths[index];
-            addFeedInstance(processInstance, RelationshipLabel.PROCESS_FEED_EDGE,
-                    context, feedName, feedInstanceDataPath);
-        }
-    }
-
-    public void addInputFeedInstances(WorkflowExecutionContext context,
-                                      Vertex processInstance) throws FalconException {
-        String inputFeedNamesArg = context.getInputFeedNames();
-        if (NONE.equals(inputFeedNamesArg) || IGNORE.equals(inputFeedNamesArg)) {
-            return; // there are no input feeds for this process
-        }
-
-        String[] inputFeedNames = context.getInputFeedNamesList();
-        String[] inputFeedInstancePaths = context.getInputFeedInstancePathsList();
-
-        for (int index = 0; index < inputFeedNames.length; index++) {
-            String inputFeedName = inputFeedNames[index];
-            String inputFeedInstancePath = inputFeedInstancePaths[index];
-            // Multiple instance paths for a given feed is separated by ","
-            String[] feedInstancePaths = inputFeedInstancePath.split(",");
-
-            for (String feedInstanceDataPath : feedInstancePaths) {
-                addFeedInstance(processInstance, RelationshipLabel.FEED_PROCESS_EDGE,
-                        context, inputFeedName, feedInstanceDataPath);
-            }
-        }
-    }
-
-    public void addReplicatedInstance(WorkflowExecutionContext context) throws FalconException {
-        // For replication there will be only one output feed name and path
-        String feedName = context.getOutputFeedNames();
-        String feedInstanceDataPath = context.getOutputFeedInstancePaths();
-        String targetClusterName = context.getClusterName();
-
-        LOG.info("Computing feed instance for : name= {} path= {}, in cluster: {}", feedName,
-                feedInstanceDataPath, targetClusterName);
-        String feedInstanceName = getFeedInstanceName(feedName, targetClusterName,
-                feedInstanceDataPath, context.getNominalTimeAsISO8601());
-        Vertex feedInstanceVertex = findVertex(feedInstanceName, RelationshipType.FEED_INSTANCE);
-
-        LOG.info("Vertex exists? name={}, type={}, v={}",
-                feedInstanceName, RelationshipType.FEED_INSTANCE, feedInstanceVertex);
-        if (feedInstanceVertex == null) { // No record of instances NOT generated by Falcon
-            LOG.info("{} instance vertex {} does not exist, add it",
-                    RelationshipType.FEED_INSTANCE, feedInstanceName);
-            feedInstanceVertex = addFeedInstance(// add a new instance
-                    feedInstanceName, context, feedName, context.getSrcClusterName());
-        }
-
-        addInstanceToEntity(feedInstanceVertex, targetClusterName, RelationshipType.CLUSTER_ENTITY,
-                RelationshipLabel.FEED_CLUSTER_REPLICATED_EDGE, context.getTimeStampAsISO8601());
-
-        addCounters(feedInstanceVertex, context);
-    }
-
-    public void addEvictedInstance(WorkflowExecutionContext context) throws FalconException {
-        final String outputFeedPaths = context.getOutputFeedInstancePaths();
-        if (IGNORE.equals(outputFeedPaths)) {
-            LOG.info("There were no evicted instances, nothing to record");
-            return;
-        }
-
-        LOG.info("Recording lineage for evicted instances {}", outputFeedPaths);
-        // For retention there will be only one output feed name
-        String feedName = context.getOutputFeedNames();
-        String[] evictedFeedInstancePathList = context.getOutputFeedInstancePathsList();
-        String clusterName = context.getClusterName();
-
-        for (String evictedFeedInstancePath : evictedFeedInstancePathList) {
-            LOG.info("Computing feed instance for : name= {}, path={}, in cluster: {}",
-                    feedName, evictedFeedInstancePath, clusterName);
-            String feedInstanceName = getFeedInstanceName(feedName, clusterName,
-                    evictedFeedInstancePath, context.getNominalTimeAsISO8601());
-            Vertex feedInstanceVertex = findVertex(feedInstanceName,
-                    RelationshipType.FEED_INSTANCE);
-
-            LOG.info("Vertex exists? name={}, type={}, v={}",
-                    feedInstanceName, RelationshipType.FEED_INSTANCE, feedInstanceVertex);
-            if (feedInstanceVertex == null) { // No record of instances NOT generated by Falcon
-                LOG.info("{} instance vertex {} does not exist, add it",
-                        RelationshipType.FEED_INSTANCE, feedInstanceName);
-                feedInstanceVertex = addFeedInstance(// add a new instance
-                        feedInstanceName, context, feedName, clusterName);
-            }
-
-            addInstanceToEntity(feedInstanceVertex, clusterName, RelationshipType.CLUSTER_ENTITY,
-                    RelationshipLabel.FEED_CLUSTER_EVICTED_EDGE, context.getTimeStampAsISO8601());
-        }
-    }
-
-
-    public void addImportedInstance(WorkflowExecutionContext context) throws FalconException {
-
-        String feedName = context.getOutputFeedNames();
-        String feedInstanceDataPath = context.getOutputFeedInstancePaths();
-        String datasourceName = context.getDatasourceName();
-        String sourceClusterName = context.getSrcClusterName();
-
-        LOG.info("Computing import feed instance for : name= {} path= {}, in cluster: {} "
-                       +  "from datasource: {}", feedName,
-                feedInstanceDataPath, sourceClusterName, datasourceName);
-        String feedInstanceName = getFeedInstanceName(feedName, sourceClusterName,
-                feedInstanceDataPath, context.getNominalTimeAsISO8601());
-        Vertex feedInstanceVertex = findVertex(feedInstanceName, RelationshipType.FEED_INSTANCE);
-
-        LOG.info("Vertex exists? name={}, type={}, v={}",
-                feedInstanceName, RelationshipType.FEED_INSTANCE, feedInstanceVertex);
-        if (feedInstanceVertex == null) { // No record of instances NOT generated by Falcon
-            LOG.info("{} instance vertex {} does not exist, add it",
-                    RelationshipType.FEED_INSTANCE, feedInstanceName);
-            feedInstanceVertex = addFeedInstance(// add a new instance
-                    feedInstanceName, context, feedName, context.getSrcClusterName());
-        }
-        addInstanceToEntity(feedInstanceVertex, datasourceName, RelationshipType.DATASOURCE_ENTITY,
-                RelationshipLabel.DATASOURCE_IMPORT_EDGE, context.getTimeStampAsISO8601());
-        addInstanceToEntity(feedInstanceVertex, sourceClusterName, RelationshipType.CLUSTER_ENTITY,
-                RelationshipLabel.FEED_CLUSTER_EDGE, context.getTimeStampAsISO8601());
-    }
-
-    public String getImportInstanceName(WorkflowExecutionContext context) {
-        return context.getEntityName() + "/" + context.getNominalTimeAsISO8601();
-    }
-
-    private void addFeedInstance(Vertex processInstance, RelationshipLabel edgeLabel,
-                                 WorkflowExecutionContext context, String feedName,
-                                 String feedInstanceDataPath) throws FalconException {
-        String clusterName = context.getClusterName();
-        LOG.info("Computing feed instance for : name= {} path= {}, in cluster: {}", feedName,
-                feedInstanceDataPath, clusterName);
-        String feedInstanceName = getFeedInstanceName(feedName, clusterName,
-                feedInstanceDataPath, context.getNominalTimeAsISO8601());
-        Vertex feedInstance = addFeedInstance(feedInstanceName, context, feedName, clusterName);
-        addProcessFeedEdge(processInstance, feedInstance, edgeLabel);
-    }
-
-    private Vertex addFeedInstance(String feedInstanceName, WorkflowExecutionContext context,
-                                   String feedName, String clusterName) throws FalconException {
-        LOG.info("Adding feed instance {}", feedInstanceName);
-        Vertex feedInstance = addVertex(feedInstanceName, RelationshipType.FEED_INSTANCE,
-                context.getTimeStampAsLong());
-
-        addInstanceToEntity(feedInstance, feedName,
-                RelationshipType.FEED_ENTITY, RelationshipLabel.INSTANCE_ENTITY_EDGE);
-        addInstanceToEntity(feedInstance, clusterName,
-                RelationshipType.CLUSTER_ENTITY, RelationshipLabel.FEED_CLUSTER_EDGE);
-        addInstanceToEntity(feedInstance, context.getWorkflowUser(),
-                RelationshipType.USER, RelationshipLabel.USER);
-
-        if (isPreserveHistory()) {
-            Feed feed = ConfigurationStore.get().get(EntityType.FEED, feedName);
-            addDataClassification(feed.getTags(), feedInstance);
-            addGroups(feed.getGroups(), feedInstance);
-        }
-
-        return feedInstance;
-    }
-
-    public static String getFeedInstanceName(String feedName, String clusterName,
-                                             String feedInstancePath,
-                                             String nominalTime) throws FalconException {
-        try {
-            Feed feed = ConfigurationStore.get().get(EntityType.FEED, feedName);
-            Cluster cluster = ConfigurationStore.get().get(EntityType.CLUSTER, clusterName);
-
-            Storage.TYPE storageType = FeedHelper.getStorageType(feed, cluster);
-            return storageType == Storage.TYPE.TABLE
-                    ? getTableFeedInstanceName(feed, feedInstancePath, storageType)
-                    : getFileSystemFeedInstanceName(feedInstancePath, feed, cluster, nominalTime);
-
-        } catch (URISyntaxException e) {
-            throw new FalconException(e);
-        }
-    }
-
-    private static String getTableFeedInstanceName(Feed feed, String feedInstancePath,
-                                            Storage.TYPE storageType) throws URISyntaxException {
-        CatalogStorage instanceStorage = (CatalogStorage) FeedHelper.createStorage(
-                storageType.name(), feedInstancePath);
-        return feed.getName() + "/" + instanceStorage.toPartitionAsPath();
-    }
-
-    private static String getFileSystemFeedInstanceName(String feedInstancePath, Feed feed,
-                                                        Cluster cluster,
-                                                        String nominalTime) throws FalconException {
-        Storage rawStorage = FeedHelper.createStorage(cluster, feed);
-        String feedPathTemplate = rawStorage.getUriTemplate(LocationType.DATA);
-        String instance = feedInstancePath;
-
-        String[] elements = FeedDataPath.PATTERN.split(feedPathTemplate);
-        for (String element : elements) {
-            instance = instance.replaceFirst(element, "");
-        }
-
-        Date instanceTime = FeedHelper.getDate(feedPathTemplate,
-                new Path(feedInstancePath), TimeZone.getTimeZone("UTC"));
-
-        return StringUtils.isEmpty(instance)
-                ? feed.getName() + "/" + nominalTime
-                : feed.getName() + "/"
-                        + SchemaHelper.formatDateUTC(instanceTime);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java b/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
deleted file mode 100644
index cf2b651..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
+++ /dev/null
@@ -1,338 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.thinkaurelius.titan.graphdb.blueprints.TitanBlueprintsGraph;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.GraphFactory;
-import com.tinkerpop.blueprints.KeyIndexableGraph;
-import com.tinkerpop.blueprints.TransactionalGraph;
-import com.tinkerpop.blueprints.Vertex;
-import com.tinkerpop.blueprints.util.TransactionRetryHelper;
-import com.tinkerpop.blueprints.util.TransactionWork;
-import org.apache.commons.configuration.BaseConfiguration;
-import org.apache.commons.configuration.Configuration;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.service.ConfigurationChangeListener;
-import org.apache.falcon.service.FalconService;
-import org.apache.falcon.service.Services;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.falcon.workflow.WorkflowJobEndNotificationService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.falcon.workflow.WorkflowExecutionContext;
-import org.apache.falcon.workflow.WorkflowExecutionListener;
-
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-/**
- * Metadata relationship mapping service. Maps relationships into a graph database.
- */
-public class MetadataMappingService
-        implements FalconService, ConfigurationChangeListener, WorkflowExecutionListener {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MetadataMappingService.class);
-
-    /**
-     * Constance for the service name.
-     */
-    public static final String SERVICE_NAME = MetadataMappingService.class.getSimpleName();
-
-    /**
-     * Constant for the configuration property that indicates the prefix.
-     */
-    private static final String FALCON_PREFIX = "falcon.graph.";
-
-
-    private Graph graph;
-    private Set<String> vertexIndexedKeys;
-    private Set<String> edgeIndexedKeys;
-    private EntityRelationshipGraphBuilder entityGraphBuilder;
-    private InstanceRelationshipGraphBuilder instanceGraphBuilder;
-
-    private int transactionRetries;
-    private long transactionRetryDelayInMillis;
-
-    @Override
-    public String getName() {
-        return SERVICE_NAME;
-    }
-
-    @Override
-    public void init() throws FalconException {
-        graph = initializeGraphDB();
-        createIndicesForVertexKeys();
-        // todo - create Edge Cardinality Constraints
-        LOG.info("Initialized graph db: {}", graph);
-
-        vertexIndexedKeys = getIndexableGraph().getIndexedKeys(Vertex.class);
-        LOG.info("Init vertex property keys: {}", vertexIndexedKeys);
-
-        edgeIndexedKeys = getIndexableGraph().getIndexedKeys(Edge.class);
-        LOG.info("Init edge property keys: {}", edgeIndexedKeys);
-
-        boolean preserveHistory = Boolean.valueOf(StartupProperties.get().getProperty(
-                "falcon.graph.preserve.history", "false"));
-        entityGraphBuilder = new EntityRelationshipGraphBuilder(graph, preserveHistory);
-        instanceGraphBuilder = new InstanceRelationshipGraphBuilder(graph, preserveHistory);
-
-        ConfigurationStore.get().registerListener(this);
-        Services.get().<WorkflowJobEndNotificationService>getService(
-                WorkflowJobEndNotificationService.SERVICE_NAME).registerListener(this);
-        try {
-            transactionRetries = Integer.parseInt(StartupProperties.get().getProperty(
-                    "falcon.graph.transaction.retry.count", "3"));
-            transactionRetryDelayInMillis = Long.parseLong(StartupProperties.get().getProperty(
-                    "falcon.graph.transaction.retry.delay", "5"));
-        } catch (NumberFormatException e) {
-            throw new FalconException("Invalid values for graph transaction retry delay/count " + e);
-        }
-    }
-
-    protected Graph initializeGraphDB() {
-        LOG.info("Initializing graph db");
-
-        Configuration graphConfig = getConfiguration();
-        return GraphFactory.open(graphConfig);
-    }
-
-    public static Configuration getConfiguration() {
-        Configuration graphConfig = new BaseConfiguration();
-
-        Properties configProperties = StartupProperties.get();
-        for (Map.Entry entry : configProperties.entrySet()) {
-            String name = (String) entry.getKey();
-            if (name.startsWith(FALCON_PREFIX)) {
-                String value = (String) entry.getValue();
-                name = name.substring(FALCON_PREFIX.length());
-                graphConfig.setProperty(name, value);
-            }
-        }
-
-        return graphConfig;
-    }
-
-    /**
-     * This unfortunately requires a handle to Titan implementation since
-     * com.tinkerpop.blueprints.KeyIndexableGraph#createKeyIndex does not create an index.
-     */
-    protected void createIndicesForVertexKeys() {
-        if (!((KeyIndexableGraph) graph).getIndexedKeys(Vertex.class).isEmpty()) {
-            LOG.info("Indexes already exist for graph");
-            return;
-        }
-
-        LOG.info("Indexes does not exist, Creating indexes for graph");
-        // todo - externalize this
-        makeNameKeyIndex();
-        makeKeyIndex(RelationshipProperty.TYPE.getName());
-        makeKeyIndex(RelationshipProperty.TIMESTAMP.getName());
-        makeKeyIndex(RelationshipProperty.VERSION.getName());
-    }
-
-    private void makeNameKeyIndex() {
-        getTitanGraph().makeKey(RelationshipProperty.NAME.getName())
-                .dataType(String.class)
-                .indexed(Vertex.class)
-                .indexed(Edge.class)
-                // .unique() todo this ought to be unique?
-                .make();
-        getTitanGraph().commit();
-    }
-
-    private void makeKeyIndex(String key) {
-        getTitanGraph().makeKey(key)
-                .dataType(String.class)
-                .indexed(Vertex.class)
-                .make();
-        getTitanGraph().commit();
-    }
-
-    public Graph getGraph() {
-        return graph;
-    }
-
-    public KeyIndexableGraph getIndexableGraph() {
-        return (KeyIndexableGraph) graph;
-    }
-
-    public TransactionalGraph getTransactionalGraph() {
-        return (TransactionalGraph) graph;
-    }
-
-    public TitanBlueprintsGraph getTitanGraph() {
-        return (TitanBlueprintsGraph) graph;
-    }
-
-    public Set<String> getVertexIndexedKeys() {
-        return vertexIndexedKeys;
-    }
-
-    public Set<String> getEdgeIndexedKeys() {
-        return edgeIndexedKeys;
-    }
-
-    @Override
-    public void destroy() throws FalconException {
-        Services.get().<WorkflowJobEndNotificationService>getService(
-                WorkflowJobEndNotificationService.SERVICE_NAME).unregisterListener(this);
-
-        LOG.info("Shutting down graph db");
-        graph.shutdown();
-    }
-
-    @Override
-    public void onAdd(final Entity entity) throws FalconException {
-        EntityType entityType = entity.getEntityType();
-        LOG.info("Adding lineage for entity: {}, type: {}", entity.getName(), entityType);
-        try {
-            new TransactionRetryHelper.Builder<Void>(getTransactionalGraph())
-                    .perform(new TransactionWork<Void>() {
-                        @Override
-                        public Void execute(TransactionalGraph transactionalGraph) throws Exception {
-                            entityGraphBuilder.addEntity(entity);
-                            transactionalGraph.commit();
-                            return null;
-                        }
-                    }).build().exponentialBackoff(transactionRetries, transactionRetryDelayInMillis);
-
-        } catch (Exception e) {
-            getTransactionalGraph().rollback();
-            throw new FalconException(e);
-        }
-    }
-
-    @Override
-    public void onRemove(Entity entity) throws FalconException {
-        // do nothing, we'd leave the deleted entities as-is for historical purposes
-        // should we mark 'em as deleted?
-    }
-
-    @Override
-    public void onChange(final Entity oldEntity, final Entity newEntity) throws FalconException {
-        EntityType entityType = newEntity.getEntityType();
-        LOG.info("Updating lineage for entity: {}, type: {}", newEntity.getName(), entityType);
-        try {
-            new TransactionRetryHelper.Builder<Void>(getTransactionalGraph())
-                    .perform(new TransactionWork<Void>() {
-                        @Override
-                        public Void execute(TransactionalGraph transactionalGraph) throws Exception {
-                            entityGraphBuilder.updateEntity(oldEntity, newEntity);
-                            transactionalGraph.commit();
-                            return null;
-                        }
-                    }).build().exponentialBackoff(transactionRetries, transactionRetryDelayInMillis);
-
-        } catch (Exception e) {
-            getTransactionalGraph().rollback();
-            throw new FalconException(e);
-        }
-    }
-
-    @Override
-    public void onReload(Entity entity) throws FalconException {
-        onAdd(entity);
-    }
-
-    @Override
-    public void onSuccess(final WorkflowExecutionContext context) throws FalconException {
-        LOG.info("Adding lineage for context {}", context);
-        try {
-            new TransactionRetryHelper.Builder<Void>(getTransactionalGraph())
-                    .perform(new TransactionWork<Void>() {
-                        @Override
-                        public Void execute(TransactionalGraph transactionalGraph) throws Exception {
-                            onSuccessfulExecution(context);
-                            transactionalGraph.commit();
-                            return null;
-                        }
-                    }).build().exponentialBackoff(transactionRetries, transactionRetryDelayInMillis);
-        } catch (Exception e) {
-            getTransactionalGraph().rollback();
-            throw new FalconException(e);
-        }
-    }
-
-    private void onSuccessfulExecution(final WorkflowExecutionContext context) throws FalconException {
-        WorkflowExecutionContext.EntityOperations entityOperation = context.getOperation();
-        switch (entityOperation) {
-        case GENERATE:
-            onProcessInstanceExecuted(context);
-            break;
-        case REPLICATE:
-            onFeedInstanceReplicated(context);
-            break;
-        case DELETE:
-            onFeedInstanceEvicted(context);
-            break;
-        case IMPORT:
-            onFeedInstanceImported(context);
-            break;
-        default:
-            throw new IllegalArgumentException("Invalid EntityOperation - " + entityOperation);
-        }
-    }
-
-    @Override
-    public void onFailure(WorkflowExecutionContext context) throws FalconException {
-        // do nothing since lineage is only recorded for successful workflow
-    }
-
-    @Override
-    public void onStart(WorkflowExecutionContext context) throws FalconException {
-        // Do nothing
-    }
-
-    @Override
-    public void onSuspend(WorkflowExecutionContext context) throws FalconException {
-        // Do nothing
-    }
-
-    @Override
-    public void onWait(WorkflowExecutionContext context) throws FalconException {
-        // TBD
-    }
-
-
-    private void onProcessInstanceExecuted(WorkflowExecutionContext context) throws FalconException {
-        Vertex processInstance = instanceGraphBuilder.addProcessInstance(context);
-        instanceGraphBuilder.addOutputFeedInstances(context, processInstance);
-        instanceGraphBuilder.addInputFeedInstances(context, processInstance);
-    }
-
-    private void onFeedInstanceReplicated(WorkflowExecutionContext context) throws FalconException {
-        LOG.info("Adding replicated feed instance: {}", context.getNominalTimeAsISO8601());
-        instanceGraphBuilder.addReplicatedInstance(context);
-    }
-
-    private void onFeedInstanceEvicted(WorkflowExecutionContext context) throws FalconException {
-        LOG.info("Adding evicted feed instance: {}", context.getNominalTimeAsISO8601());
-        instanceGraphBuilder.addEvictedInstance(context);
-    }
-    private void onFeedInstanceImported(WorkflowExecutionContext context) throws FalconException {
-        LOG.info("Adding imported feed instance: {}", context.getNominalTimeAsISO8601());
-        instanceGraphBuilder.addImportedInstance(context);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/RelationshipGraphBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/RelationshipGraphBuilder.java b/common/src/main/java/org/apache/falcon/metadata/RelationshipGraphBuilder.java
deleted file mode 100644
index 0c3fcee..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/RelationshipGraphBuilder.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.GraphQuery;
-import com.tinkerpop.blueprints.Vertex;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Date;
-import java.util.Iterator;
-
-/**
- * Base class for Metadata relationship mapping helper.
- */
-public abstract class RelationshipGraphBuilder {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RelationshipGraphBuilder.class);
-
-    /**
-     * A blueprints graph.
-     */
-    private final Graph graph;
-
-    /**
-     * If enabled, preserves history of tags and groups for instances else will only
-     * be available for entities.
-     */
-    private final boolean preserveHistory;
-
-    protected RelationshipGraphBuilder(Graph graph, boolean preserveHistory) {
-        this.graph = graph;
-        this.preserveHistory = preserveHistory;
-    }
-
-    public Graph getGraph() {
-        return graph;
-    }
-
-    protected boolean isPreserveHistory() {
-        return preserveHistory;
-    }
-
-    public Vertex addVertex(String name, RelationshipType type) {
-        Vertex vertex = findVertex(name, type);
-        if (vertex != null) {
-            LOG.debug("Found an existing vertex for: name={}, type={}", name, type);
-            return vertex;
-        }
-
-        return createVertex(name, type);
-    }
-
-    protected Vertex addVertex(String name, RelationshipType type, long timestamp) {
-        Vertex vertex = findVertex(name, type);
-        if (vertex != null) {
-            LOG.debug("Found an existing vertex for: name={}, type={}", name, type);
-            return vertex;
-        }
-
-        return createVertex(name, type, timestamp);
-    }
-
-    protected Vertex findVertex(String name, RelationshipType type) {
-        LOG.debug("Finding vertex for: name={}, type={}", name, type);
-
-        GraphQuery query = graph.query()
-                .has(RelationshipProperty.NAME.getName(), name)
-                .has(RelationshipProperty.TYPE.getName(), type.getName());
-        Iterator<Vertex> results = query.vertices().iterator();
-        return results.hasNext() ? results.next() : null;  // returning one since name is unique
-    }
-
-    protected Vertex createVertex(String name, RelationshipType type) {
-        return createVertex(name, type, System.currentTimeMillis());
-    }
-
-    protected Vertex createVertex(String name, RelationshipType type, long timestamp) {
-        LOG.debug("Creating a new vertex for: name={}, type={}", name, type);
-
-        Vertex vertex = graph.addVertex(null);
-        vertex.setProperty(RelationshipProperty.NAME.getName(), name);
-        vertex.setProperty(RelationshipProperty.TYPE.getName(), type.getName());
-        vertex.setProperty(RelationshipProperty.TIMESTAMP.getName(), timestamp);
-
-        return vertex;
-    }
-
-    protected Edge addEdge(Vertex fromVertex, Vertex toVertex, String edgeLabel) {
-        return addEdge(fromVertex, toVertex, edgeLabel, null);
-    }
-
-    protected Edge addEdge(Vertex fromVertex, Vertex toVertex,
-                           String edgeLabel, String timestamp) {
-        Edge edge = findEdge(fromVertex, toVertex, edgeLabel);
-
-        Edge edgeToVertex = edge != null ? edge : fromVertex.addEdge(edgeLabel, toVertex);
-        if (timestamp != null) {
-            edgeToVertex.setProperty(RelationshipProperty.TIMESTAMP.getName(), timestamp);
-        }
-
-        return edgeToVertex;
-    }
-
-    protected void removeEdge(Vertex fromVertex, Vertex toVertex, String edgeLabel) {
-        Edge edge = findEdge(fromVertex, toVertex, edgeLabel);
-        if (edge != null) {
-            getGraph().removeEdge(edge);
-        }
-    }
-
-    protected void removeEdge(Vertex fromVertex, Object toVertexName, String edgeLabel) {
-        Edge edge = findEdge(fromVertex, toVertexName, edgeLabel);
-        if (edge != null) {
-            getGraph().removeEdge(edge);
-        }
-    }
-
-    protected Edge findEdge(Vertex fromVertex, Vertex toVertex, String edgeLabel) {
-        return findEdge(fromVertex, toVertex.getProperty(RelationshipProperty.NAME.getName()), edgeLabel);
-    }
-
-    protected Edge findEdge(Vertex fromVertex, Object toVertexName, String edgeLabel) {
-        Edge edgeToFind = null;
-        for (Edge edge : fromVertex.getEdges(Direction.OUT, edgeLabel)) {
-            if (edge.getVertex(Direction.IN).getProperty(RelationshipProperty.NAME.getName()).equals(toVertexName)) {
-                edgeToFind = edge;
-                break;
-            }
-        }
-
-        return edgeToFind;
-    }
-
-    protected void addUserRelation(Vertex fromVertex) {
-        addUserRelation(fromVertex, RelationshipLabel.USER.getName());
-    }
-
-    protected void addUserRelation(Vertex fromVertex, String edgeLabel) {
-        Vertex relationToUserVertex = addVertex(CurrentUser.getUser(), RelationshipType.USER);
-        addEdge(fromVertex, relationToUserVertex, edgeLabel);
-    }
-
-    protected void addDataClassification(String classification, Vertex entityVertex) {
-        if (classification == null || classification.length() == 0) {
-            return;
-        }
-
-        String[] tags = classification.split(",");
-        for (String tag : tags) {
-            int index = tag.indexOf("=");
-            String tagKey = tag.substring(0, index);
-            String tagValue = tag.substring(index + 1, tag.length());
-
-            Vertex tagValueVertex = addVertex(tagValue, RelationshipType.TAGS);
-            addEdge(entityVertex, tagValueVertex, tagKey);
-        }
-    }
-
-    protected void addGroups(String groups, Vertex fromVertex) {
-        addCSVTags(groups, fromVertex, RelationshipType.GROUPS, RelationshipLabel.GROUPS);
-    }
-
-    protected void addPipelines(String pipelines, Vertex fromVertex) {
-        addCSVTags(pipelines, fromVertex, RelationshipType.PIPELINES, RelationshipLabel.PIPELINES);
-    }
-
-    protected void addProcessFeedEdge(Vertex processVertex, Vertex feedVertex,
-                                      RelationshipLabel edgeLabel) {
-        if (edgeLabel == RelationshipLabel.FEED_PROCESS_EDGE) {
-            addEdge(feedVertex, processVertex, edgeLabel.getName());
-        } else {
-            addEdge(processVertex, feedVertex, edgeLabel.getName());
-        }
-    }
-
-    protected String getCurrentTimeStamp() {
-        return SchemaHelper.formatDateUTC(new Date());
-    }
-
-    /**
-     * Adds comma separated values as tags.
-     *
-     * @param csvTags           comma separated values.
-     * @param fromVertex        from vertex.
-     * @param relationshipType  vertex type.
-     * @param edgeLabel         edge label.
-     */
-    private void addCSVTags(String csvTags, Vertex fromVertex,
-                            RelationshipType relationshipType, RelationshipLabel edgeLabel) {
-        if (StringUtils.isEmpty(csvTags)) {
-            return;
-        }
-
-        String[] tags = csvTags.split(",");
-        for (String tag : tags) {
-            Vertex vertex = addVertex(tag, relationshipType);
-            addEdge(fromVertex, vertex, edgeLabel.getName());
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/RelationshipLabel.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/RelationshipLabel.java b/common/src/main/java/org/apache/falcon/metadata/RelationshipLabel.java
deleted file mode 100644
index 6d4bf46..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/RelationshipLabel.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-/**
- * Enumerates Relationship edge labels.
- */
-public enum RelationshipLabel {
-
-    // entity edge labels
-    FEED_CLUSTER_EDGE("stored-in"),
-    PROCESS_CLUSTER_EDGE("runs-on"),
-    FEED_PROCESS_EDGE("input"),
-    PROCESS_FEED_EDGE("output"),
-    DATASOURCE_IMPORT_EDGE("import"),
-
-    // instance edge labels
-    INSTANCE_ENTITY_EDGE("instance-of"),
-
-    // edge labels
-    CLUSTER_COLO("collocated"),
-    USER("owned-by"),
-    GROUPS("grouped-as"),
-    PIPELINES("pipeline"),
-
-    // replication labels
-    FEED_CLUSTER_REPLICATED_EDGE("replicated-to"),
-
-    // eviction labels
-    FEED_CLUSTER_EVICTED_EDGE("evicted-from");
-
-    private final String name;
-
-    RelationshipLabel(String name) {
-        this.name = name;
-    }
-
-    public String getName() {
-        return name;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/RelationshipProperty.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/RelationshipProperty.java b/common/src/main/java/org/apache/falcon/metadata/RelationshipProperty.java
deleted file mode 100644
index ff437d9..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/RelationshipProperty.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-/**
- * Enumerates Relationship property keys.
- */
-public enum RelationshipProperty {
-
-    // vertex property keys - indexed
-    NAME("name"),
-    TYPE("type"),
-    TIMESTAMP("timestamp"),
-    VERSION("version"),
-
-    // workflow properties
-    USER_WORKFLOW_ENGINE("userWorkflowEngine", "user workflow engine type"),
-    USER_WORKFLOW_NAME("userWorkflowName", "user workflow name"),
-    USER_WORKFLOW_VERSION("userWorkflowVersion", "user workflow version"),
-
-    // workflow instance properties
-    WORKFLOW_ID("workflowId", "current workflow-id of the instance"),
-    RUN_ID("runId", "current run-id of the instance"),
-    STATUS("status", "status of the user workflow instance"),
-    WF_ENGINE_URL("workflowEngineUrl", "url of workflow engine server, ex: oozie"),
-    USER_SUBFLOW_ID("subflowId", "external id of user workflow");
-
-
-    private final String name;
-    private final String description;
-
-    RelationshipProperty(String name) {
-        this(name, name);
-    }
-
-    RelationshipProperty(String name, String description) {
-        this.name = name;
-        this.description = description;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/retention/EvictedInstanceSerDe.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/retention/EvictedInstanceSerDe.java b/common/src/main/java/org/apache/falcon/retention/EvictedInstanceSerDe.java
deleted file mode 100644
index b4d46c4..0000000
--- a/common/src/main/java/org/apache/falcon/retention/EvictedInstanceSerDe.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.retention;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * Utility class for serializing and deserializing the evicted instance paths.
- */
-
-public final class EvictedInstanceSerDe {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EvictedInstanceSerDe.class);
-
-    public static final String INSTANCEPATH_PREFIX = "instancePaths=";
-    public static final String INSTANCEPATH_SEPARATOR = ",";
-
-
-    private EvictedInstanceSerDe() {}
-
-    /**
-     * This method serializes the evicted instances to a file in logs dir for a given feed.
-     * @see org.apache.falcon.retention.FeedEvictor
-     *
-     * *Note:* This is executed with in the map task for evictor action
-     *
-     * @param fileSystem file system handle
-     * @param logFilePath       File path to serialize the instances to
-     * @param instances  list of instances, comma separated
-     * @throws IOException
-     */
-    public static void serializeEvictedInstancePaths(final FileSystem fileSystem,
-                                                     final Path logFilePath,
-                                                     StringBuffer instances) throws IOException {
-        LOG.info("Writing deleted instances {} to path {}", instances, logFilePath);
-        OutputStream out = null;
-        try {
-            out = fileSystem.create(logFilePath);
-            instances.insert(0, INSTANCEPATH_PREFIX); // add the prefix
-            out.write(instances.toString().getBytes());
-
-            // To make sure log cleaning service can delete this file
-            FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
-            fileSystem.setPermission(logFilePath, permission);
-        } finally {
-            if (out != null) {
-                out.close();
-            }
-        }
-
-        if (LOG.isDebugEnabled()) {
-            logEvictedInstancePaths(fileSystem, logFilePath);
-        }
-    }
-
-    private static void logEvictedInstancePaths(final FileSystem fs,
-                                                final Path outPath) throws IOException {
-        ByteArrayOutputStream writer = new ByteArrayOutputStream();
-        InputStream instance = fs.open(outPath);
-        IOUtils.copyBytes(instance, writer, 4096, true);
-        LOG.debug("Instance Paths copied to {}", outPath);
-        LOG.debug("Written {}", writer);
-    }
-
-    /**
-     * This method deserializes the evicted instances from a log file on hdfs.
-     * @see org.apache.falcon.messaging.JMSMessageProducer
-     * *Note:* This is executed with in the falcon server
-     *
-     * @param fileSystem file system handle
-     * @param logFile    File path to serialize the instances to
-     * @return list of instances, comma separated
-     * @throws IOException
-     */
-    public static String[] deserializeEvictedInstancePaths(final FileSystem fileSystem,
-                                                           final Path logFile) throws IOException {
-        ByteArrayOutputStream writer = new ByteArrayOutputStream();
-        InputStream instance = fileSystem.open(logFile);
-        IOUtils.copyBytes(instance, writer, 4096, true);
-        String[] instancePaths = writer.toString().split(INSTANCEPATH_PREFIX);
-
-        if (instancePaths.length <= 1) {
-            LOG.info("Returning 0 instance paths for feed ");
-            return new String[0];
-        } else {
-            LOG.info("Returning instance paths for feed {}", instancePaths[1]);
-            return instancePaths[1].split(INSTANCEPATH_SEPARATOR);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/retention/EvictionHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/retention/EvictionHelper.java b/common/src/main/java/org/apache/falcon/retention/EvictionHelper.java
deleted file mode 100644
index 1457b06..0000000
--- a/common/src/main/java/org/apache/falcon/retention/EvictionHelper.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.retention;
-
-import org.apache.commons.el.ExpressionEvaluatorImpl;
-import org.apache.falcon.Pair;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.jsp.el.ELException;
-import javax.servlet.jsp.el.ExpressionEvaluator;
-import java.util.Date;
-
-/**
- * Utilities for feed eviction.
- */
-public final class EvictionHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EvictionHelper.class);
-
-    private static final ExpressionEvaluator EVALUATOR = new ExpressionEvaluatorImpl();
-    private static final ExpressionHelper RESOLVER = ExpressionHelper.get();
-
-    private EvictionHelper(){}
-
-    public static Pair<Date, Date> getDateRange(String period) throws ELException {
-        Long duration = (Long) EVALUATOR.evaluate("${" + period + "}",
-                Long.class, RESOLVER, RESOLVER);
-        Date end = new Date();
-        Date start = new Date(end.getTime() - duration);
-        return Pair.of(start, end);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java b/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
deleted file mode 100644
index f7b2155..0000000
--- a/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang.Validate;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.aspect.GenericAlert;
-import org.apache.falcon.service.FalconService;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.Date;
-import java.util.Properties;
-import java.util.Timer;
-import java.util.TimerTask;
-
-
-/**
- * Authentication Service at startup that initializes the authentication credentials
- * based on authentication type. If Kerberos is enabled, it logs in the user with the key tab.
- */
-public class AuthenticationInitializationService implements FalconService {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AuthenticationInitializationService.class);
-
-    /**
-     * Constant for the configuration property that indicates the prefix.
-     */
-    protected static final String CONFIG_PREFIX = "falcon.service.authentication.";
-
-    /**
-     * Constant for the configuration property that indicates the keytab file path.
-     */
-    protected static final String KERBEROS_KEYTAB = CONFIG_PREFIX + KerberosAuthenticationHandler.KEYTAB;
-
-    /**
-     * Constant for the configuration property that indicates the kerberos principal.
-     */
-    protected static final String KERBEROS_PRINCIPAL = CONFIG_PREFIX + KerberosAuthenticationHandler.PRINCIPAL;
-
-    /**
-     * Constant for the configuration property that indicates the authentication token validity time in seconds.
-     */
-    protected static final String AUTH_TOKEN_VALIDITY_SECONDS = CONFIG_PREFIX + "token.validity";
-
-    private Timer timer = new Timer();
-    private static final String SERVICE_NAME = "Authentication initialization service";
-
-    @Override
-    public String getName() {
-        return SERVICE_NAME;
-    }
-
-    @Override
-    public void init() throws FalconException {
-
-        if (SecurityUtil.isSecurityEnabled()) {
-            LOG.info("Falcon Kerberos Authentication Enabled!");
-            initializeKerberos();
-
-            String authTokenValidity = StartupProperties.get().getProperty(AUTH_TOKEN_VALIDITY_SECONDS);
-            long validateFrequency;
-            try {
-                validateFrequency = (StringUtils.isNotEmpty(authTokenValidity))
-                        ? Long.parseLong(authTokenValidity) : 86400;
-            } catch (NumberFormatException nfe) {
-                throw new FalconException("Invalid value provided for startup property \""
-                        + AUTH_TOKEN_VALIDITY_SECONDS + "\", please provide a valid long number", nfe);
-            }
-            timer.schedule(new TokenValidationThread(), 0, validateFrequency*1000);
-        } else {
-            LOG.info("Falcon Simple Authentication Enabled!");
-            Configuration ugiConf = new Configuration();
-            ugiConf.set("hadoop.security.authentication", "simple");
-            UserGroupInformation.setConfiguration(ugiConf);
-        }
-    }
-
-    protected static void initializeKerberos() throws FalconException {
-        try {
-            Properties configuration = StartupProperties.get();
-            String principal = configuration.getProperty(KERBEROS_PRINCIPAL);
-            Validate.notEmpty(principal,
-                    "Missing required configuration property: " + KERBEROS_PRINCIPAL);
-            principal = org.apache.hadoop.security.SecurityUtil.getServerPrincipal(
-                    principal, SecurityUtil.getLocalHostName());
-
-            String keytabFilePath = configuration.getProperty(KERBEROS_KEYTAB);
-            Validate.notEmpty(keytabFilePath,
-                    "Missing required configuration property: " + KERBEROS_KEYTAB);
-            checkIsReadable(keytabFilePath);
-
-            Configuration conf = new Configuration();
-            conf.set("hadoop.security.authentication", "kerberos");
-
-            UserGroupInformation.setConfiguration(conf);
-            UserGroupInformation.loginUserFromKeytab(principal, keytabFilePath);
-
-            LOG.info("Got Kerberos ticket, keytab: {}, Falcon principal: {}", keytabFilePath, principal);
-        } catch (Exception ex) {
-            throw new FalconException("Could not initialize " + SERVICE_NAME
-                    + ": " + ex.getMessage(), ex);
-        }
-    }
-
-    private static void checkIsReadable(String keytabFilePath) {
-        File keytabFile = new File(keytabFilePath);
-        if (!keytabFile.exists()) {
-            throw new IllegalArgumentException("The keytab file does not exist! " + keytabFilePath);
-        }
-
-        if (!keytabFile.isFile()) {
-            throw new IllegalArgumentException("The keytab file cannot be a directory! " + keytabFilePath);
-        }
-
-        if (!keytabFile.canRead()) {
-            throw new IllegalArgumentException("The keytab file is not readable! " + keytabFilePath);
-        }
-    }
-
-    @Override
-    public void destroy() throws FalconException {
-        timer.cancel();
-    }
-
-    private static class TokenValidationThread extends TimerTask {
-        @Override
-        public void run() {
-            try {
-                LOG.info("Validating Auth Token: {}", new Date());
-                initializeKerberos();
-            } catch (Throwable t) {
-                LOG.error("Error in Auth Token Validation task: ", t);
-                GenericAlert.initializeKerberosFailed(
-                        "Exception in Auth Token Validation : ", t);
-            }
-        }
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/AuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/AuthorizationProvider.java b/common/src/main/java/org/apache/falcon/security/AuthorizationProvider.java
deleted file mode 100644
index a6f2564..0000000
--- a/common/src/main/java/org/apache/falcon/security/AuthorizationProvider.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.falcon.entity.EntityNotRegisteredException;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-
-import java.io.IOException;
-
-/**
- * An interface for authorizing user against an entity operation.
- */
-public interface AuthorizationProvider {
-
-    /**
-     * Check if the authenticated user is a super user.
-     *
-     * @param authenticatedUGI   proxy ugi for the authenticated user
-     * @return true if sure user, else false
-     */
-    boolean isSuperUser(UserGroupInformation authenticatedUGI);
-
-    /**
-     * Checks if authenticated user can proxy the entity acl owner.
-     *
-     * @param authenticatedUGI  proxy ugi for the authenticated user.
-     * @param aclOwner          entity ACL Owner.
-     * @param aclGroup          entity ACL group.
-     * @throws IOException
-     */
-    boolean shouldProxy(UserGroupInformation authenticatedUGI,
-                        String aclOwner, String aclGroup) throws IOException;
-
-    /**
-     * Determines if the authenticated user is authorized to execute the action on the resource,
-     * which is typically a REST resource path.
-     * Throws an exception if not authorized.
-     *
-     * @param resource   api resource, admin, entities or instance
-     * @param action     action being authorized on resource and entity if applicable
-     * @param entityType entity type in question, not for admin resource
-     * @param entityName entity name in question, not for admin resource
-     * @param authenticatedUGI   proxy ugi for the authenticated user
-     * @throws AuthorizationException
-     */
-    void authorizeResource(String resource,
-                           String action,
-                           String entityType,
-                           String entityName,
-                           UserGroupInformation authenticatedUGI)
-        throws AuthorizationException, EntityNotRegisteredException;
-
-    /**
-     * Determines if the authenticated user is authorized to execute the action on the entity.
-     * Throws an exception if not authorized.
-     *
-     * @param entityName entity in question, applicable for entities and instance resource
-     * @param entityType entity in question, applicable for entities and instance resource
-     * @param acl        entity ACL
-     * @param action     action being authorized on resource and entity if applicable
-     * @param authenticatedUGI   proxy ugi for the authenticated user
-     * @throws AuthorizationException
-     */
-    void authorizeEntity(String entityName, String entityType,
-                         AccessControlList acl, String action,
-                         UserGroupInformation authenticatedUGI) throws AuthorizationException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/CredentialProviderHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/CredentialProviderHelper.java b/common/src/main/java/org/apache/falcon/security/CredentialProviderHelper.java
deleted file mode 100644
index fc4f745..0000000
--- a/common/src/main/java/org/apache/falcon/security/CredentialProviderHelper.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-/**
- * Helper class for Hadoop credential provider functionality. Reflection to used to avoid
- * directly referencing the classes and methods so that version dependency is not introduced
- * as the Hadoop credential provider is only introduced in 2.6.0 and later.
- */
-
-public final class CredentialProviderHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(CredentialProviderHelper.class);
-
-    private static Class<?> clsCredProvider;
-    private static Class<?> clsCredProviderFactory;
-    private static Method methGetPassword;
-    private static Method methCreateCredEntry;
-    private static Method methFlush;
-    private static Method methGetProviders;
-
-    public static final String CREDENTIAL_PROVIDER_PATH = "hadoop.security.credential.provider.path";
-
-    static {
-        try {
-            LOG.debug("Reflecting credential provider classes and methods");
-            clsCredProvider = Class.forName("org.apache.hadoop.security.alias.CredentialProvider");
-            clsCredProviderFactory = Class.forName("org.apache.hadoop.security.alias.CredentialProviderFactory");
-            methCreateCredEntry = clsCredProvider.getMethod("createCredentialEntry", String.class, char[].class);
-            methFlush = clsCredProvider.getMethod("flush");
-            methGetPassword = Configuration.class.getMethod("getPassword", String.class);
-            methGetProviders = clsCredProviderFactory.getMethod("getProviders", new Class[] { Configuration.class });
-            LOG.debug("Found CredentialProviderFactory#getProviders");
-        } catch (ClassNotFoundException | NoSuchMethodException cnfe) {
-            LOG.debug("Ignoring exception", cnfe);
-        }
-    }
-
-    private CredentialProviderHelper() {
-
-    }
-
-    public static boolean isProviderAvailable() {
-        return !(clsCredProvider == null
-                || clsCredProviderFactory == null
-                || methCreateCredEntry == null
-                || methGetPassword == null
-                || methFlush == null);
-    }
-
-    public static String resolveAlias(Configuration conf, String alias) throws IOException {
-        try {
-            char[] cred = (char[]) methGetPassword.invoke(conf, alias);
-            if (cred == null) {
-                throw new IOException("The provided alias cannot be resolved");
-            }
-            return new String(cred);
-        } catch (InvocationTargetException ite) {
-            throw new RuntimeException("Error resolving password "
-                    + " from the credential providers ", ite.getTargetException());
-        } catch (IllegalAccessException iae) {
-            throw new RuntimeException("Error invoking the credential provider method", iae);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/security/CurrentUser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/CurrentUser.java b/common/src/main/java/org/apache/falcon/security/CurrentUser.java
deleted file mode 100644
index e7c1594..0000000
--- a/common/src/main/java/org/apache/falcon/security/CurrentUser.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.security;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.service.ProxyUserService;
-import org.apache.falcon.service.Services;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-/**
- * Current authenticated user via REST. Also captures the proxy user from authorized entity
- * and doles out proxied UserGroupInformation. Caches proxied users.
- */
-public final class CurrentUser {
-
-    private static final Logger LOG = LoggerFactory.getLogger(CurrentUser.class);
-    private static final Logger AUDIT = LoggerFactory.getLogger("AUDIT");
-
-    private final String authenticatedUser;
-    private String proxyUser;
-
-    private CurrentUser(String authenticatedUser) {
-        this.authenticatedUser = authenticatedUser;
-        this.proxyUser = authenticatedUser;
-    }
-
-    private static final ThreadLocal<CurrentUser> CURRENT_USER = new ThreadLocal<CurrentUser>();
-
-    /**
-     * Captures the authenticated user.
-     *
-     * @param user   authenticated user
-     */
-    public static void authenticate(final String user) {
-        if (StringUtils.isEmpty(user)) {
-            throw new IllegalStateException("Bad user name sent for authentication");
-        }
-
-        LOG.info("Logging in {}", user);
-        CurrentUser currentUser = new CurrentUser(user);
-        CURRENT_USER.set(currentUser);
-    }
-
-    /**
-     * Proxies doAs user.
-     *
-     * @param doAsUser doAs user
-     * @param proxyHost proxy host
-     * @throws IOException
-     */
-    public static void proxyDoAsUser(final String doAsUser,
-                                     final String proxyHost) throws IOException {
-        if (!isAuthenticated()) {
-            throw new IllegalStateException("Authentication not done");
-        }
-
-        String currentUser = CURRENT_USER.get().authenticatedUser;
-        if (StringUtils.isNotEmpty(doAsUser) && !doAsUser.equalsIgnoreCase(currentUser)) {
-            if (StringUtils.isEmpty(proxyHost)) {
-                throw new IllegalArgumentException("proxy host cannot be null or empty");
-            }
-            ProxyUserService proxyUserService = Services.get().getService(ProxyUserService.SERVICE_NAME);
-            try {
-                proxyUserService.validate(currentUser, proxyHost, doAsUser);
-            } catch (IOException ex) {
-                throw new RuntimeException(ex);
-            }
-
-            CurrentUser user = CURRENT_USER.get();
-            LOG.info("Authenticated user {} is proxying doAs user {} from host {}",
-                    user.authenticatedUser, doAsUser, proxyHost);
-            AUDIT.info("Authenticated user {} is proxying doAs user {} from host {}",
-                    user.authenticatedUser, doAsUser, proxyHost);
-            user.proxyUser = doAsUser;
-        }
-    }
-
-    /**
-     * Captures the entity owner if authenticated user is a super user.
-     *
-     * @param aclOwner entity acl owner
-     * @param aclGroup entity acl group
-     * @throws IOException
-     */
-    public static void proxy(final String aclOwner,
-                             final String aclGroup) throws IOException {
-        if (!isAuthenticated() || StringUtils.isEmpty(aclOwner)) {
-            throw new IllegalStateException("Authentication not done or Bad user name");
-        }
-
-        CurrentUser user = CURRENT_USER.get();
-        LOG.info("Authenticated user {} is proxying entity owner {}/{}",
-                user.authenticatedUser, aclOwner, aclGroup);
-        AUDIT.info("Authenticated user {} is proxying entity owner {}/{}",
-                user.authenticatedUser, aclOwner, aclGroup);
-        user.proxyUser = aclOwner;
-    }
-
-    /**
-     * Clears the context.
-     */
-    public static void clear() {
-        CURRENT_USER.remove();
-    }
-
-    /**
-     * Checks if the authenticate method is already called.
-     *
-     * @return true if authenticated user is set else false
-     */
-    public static boolean isAuthenticated() {
-        CurrentUser user = CURRENT_USER.get();
-        return user != null && user.authenticatedUser != null;
-    }
-
-    /**
-     * Returns authenticated user.
-     *
-     * @return logged in authenticated user.
-     */
-    public static String getAuthenticatedUser() {
-        CurrentUser user = CURRENT_USER.get();
-        if (user == null || user.authenticatedUser == null) {
-            throw new IllegalStateException("No user logged into the system");
-        } else {
-            return user.authenticatedUser;
-        }
-    }
-
-    /**
-     * Dole out a UGI object for the current authenticated user if authenticated
-     * else return current user.
-     *
-     * @return UGI object
-     * @throws java.io.IOException
-     */
-    public static UserGroupInformation getAuthenticatedUGI() throws IOException {
-        return CurrentUser.isAuthenticated()
-            ? createProxyUGI(getAuthenticatedUser()) : UserGroupInformation.getCurrentUser();
-    }
-
-    /**
-     * Returns the proxy user.
-     *
-     * @return proxy user
-     */
-    public static String getUser() {
-        CurrentUser user = CURRENT_USER.get();
-        if (user == null || user.proxyUser == null) {
-            throw new IllegalStateException("No user logged into the system");
-        } else {
-            return user.proxyUser;
-        }
-    }
-
-    private static ConcurrentMap<String, UserGroupInformation> userUgiMap =
-            new ConcurrentHashMap<String, UserGroupInformation>();
-
-    /**
-     * Create a proxy UGI object for the proxy user.
-     *
-     * @param proxyUser logged in user
-     * @return UGI object
-     * @throws IOException
-     */
-    public static UserGroupInformation createProxyUGI(String proxyUser) throws IOException {
-        UserGroupInformation proxyUgi = userUgiMap.get(proxyUser);
-        if (proxyUgi == null) {
-            // taking care of a race condition, the latest UGI will be discarded
-            proxyUgi = UserGroupInformation.createProxyUser(
-                    proxyUser, UserGroupInformation.getLoginUser());
-            userUgiMap.putIfAbsent(proxyUser, proxyUgi);
-        }
-
-        return proxyUgi;
-    }
-
-    /**
-     * Dole out a proxy UGI object for the current authenticated user if authenticated
-     * else return current user.
-     *
-     * @return UGI object
-     * @throws java.io.IOException
-     */
-    public static UserGroupInformation getProxyUGI() throws IOException {
-        return CurrentUser.isAuthenticated()
-            ? createProxyUGI(getUser()) : UserGroupInformation.getCurrentUser();
-    }
-
-    /**
-     * Gets a collection of group names the proxy user belongs to.
-     *
-     * @return group names
-     * @throws IOException
-     */
-    public static Set<String> getGroupNames() throws IOException {
-        HashSet<String> s = new HashSet<String>(Arrays.asList(getProxyUGI().getGroupNames()));
-        return Collections.unmodifiableSet(s);
-    }
-
-    /**
-     * Returns the primary group name for the proxy user.
-     *
-     * @return primary group name for the proxy user
-     */
-    public static String getPrimaryGroupName() {
-        try {
-            String[] groups = getProxyUGI().getGroupNames();
-            if (groups.length > 0) {
-                return groups[0];
-            }
-        } catch (IOException ignore) {
-            // ignored
-        }
-
-        return "unknown"; // this can only happen in tests
-    }
-}


[44/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/feed-0.1.xsd
----------------------------------------------------------------------
diff --git a/client/src/main/resources/feed-0.1.xsd b/client/src/main/resources/feed-0.1.xsd
deleted file mode 100644
index 77e8663..0000000
--- a/client/src/main/resources/feed-0.1.xsd
+++ /dev/null
@@ -1,575 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" attributeFormDefault="unqualified" elementFormDefault="qualified"
-           targetNamespace="uri:falcon:feed:0.1" xmlns="uri:falcon:feed:0.1"
-           xmlns:jaxb="http://java.sun.com/xml/ns/jaxb" jaxb:version="2.1">
-
-    <xs:annotation>
-        <xs:documentation>
-            Licensed to the Apache Software Foundation (ASF) under one or more
-            contributor license agreements. See the NOTICE file distributed with
-            this work for
-            additional information regarding copyright ownership.
-            The ASF licenses this file to You under the Apache License, Version
-            2.0
-            (the "License"); you may not use this file
-            except in compliance with
-            the License. You may obtain a copy of the License at
-
-            http://www.apache.org/licenses/LICENSE-2.0
-
-            Unless required by applicable law or agreed to in
-            writing, software
-            distributed under the License is distributed on an "AS IS" BASIS,
-            WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-            implied.
-            See the License
-            for the specific language governing permissions and
-            limitations under the License.
-        </xs:documentation>
-        <xs:appinfo>
-            <jaxb:schemaBindings>
-                <jaxb:package name="org.apache.falcon.entity.v0.feed"/>
-            </jaxb:schemaBindings>
-        </xs:appinfo>
-    </xs:annotation>
-
-    <xs:element name="feed" type="feed">
-    </xs:element>
-    <xs:complexType name="feed">
-        <xs:annotation>
-            <xs:documentation>
-                name: A feed should have a unique name and this name is referenced
-                by processes as input or output feed.
-                tags: a feed specifies an optional list of comma separated tags
-                which is used for classification of data sets.
-                groups: a feed specifies a list of comma separated groups,
-                a group is a logical grouping of feeds and a group is said to be
-                available if all the feeds belonging to a group are available.
-                The frequency of all
-                the feed which belong to the same group
-                must be same.
-                availabilityFlag: specifies the name of a file which when
-                present/created
-                in a feeds data directory, the feed is
-                termed as available. ex: _SUCCESS, if
-                this element is ignored then Falcon would consider the presence of feed's
-                data directory as feed availability.
-                A feed has a
-                frequency and a periodicity which specifies the frequency by which
-                this feed is generated. ex: it can be generated every hour, every 5 minutes, daily, weekly etc.
-                valid frequency type for a feed are minutes, hours, days, months.
-                sla: A feed can have SLA and each SLA has two properties - slaLow and slaHigh. Both slaLow and slaHigh
-                are written using expressions like frequency. slaLow is intended to serve for alerting for feeds which
-                are in danger of missing their availability SLAs. slaHigh is intended to serve for reporting the feeds
-                which missed their SLAs. SLAs are relative to feed instance time.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="KEY_VALUE_PAIR" name="tags" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>
-                        tags: a feed specifies an optional list of comma separated tags,
-                        Key Value Pairs, separated by comma,
-                        which is used for classification of processes.
-                        Example: consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="partitions" name="partitions" minOccurs="0"/>
-            <xs:element type="group-type" name="groups" minOccurs="0"/>
-            <xs:element type="xs:string" name="availabilityFlag" minOccurs="0"/>
-            <xs:element type="frequency-type" name="frequency"/>
-            <xs:element type="sla" name="sla" minOccurs="0"/>
-            <xs:element name="timezone" minOccurs="0" default="UTC">
-                <xs:simpleType>
-                    <xs:annotation>
-                        <xs:appinfo>
-                            <jaxb:javaType name="java.util.TimeZone" parseMethod="java.util.TimeZone.getTimeZone"
-                                           printMethod="org.apache.falcon.entity.v0.SchemaHelper.getTimeZoneId"/>
-                        </xs:appinfo>
-                    </xs:annotation>
-                    <xs:restriction base="xs:string"/>
-                </xs:simpleType>
-            </xs:element>
-            <xs:element type="late-arrival" name="late-arrival" minOccurs="0"/>
-            <xs:element type="clusters" name="clusters"/>
-            <xs:choice minOccurs="1" maxOccurs="1">
-                <xs:element type="locations" name="locations"/>
-                <xs:element type="catalog-table" name="table"/>
-            </xs:choice>
-            <xs:element type="notification" name="notification" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Notification will help to notify the users about the finished status of Falcon
-                        Instance. Currently Email type notification is supported and users must specify the receiver's
-                        email address.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="ACL" name="ACL"/>
-            <xs:element type="schema" name="schema"/>
-            <xs:element type="properties" name="properties" minOccurs="0"/>
-            <xs:element type="lifecycle" name="lifecycle" minOccurs="0" />
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="description"/>
-    </xs:complexType>
-    <xs:complexType name="cluster">
-        <xs:annotation>
-            <xs:documentation>
-                Feed references a cluster by it's name, before submitting a feed all the
-                referenced cluster should be submitted to Falcon.
-                type: specifies whether the
-                referenced cluster should be treated as a
-                source or target for a feed.
-                Validity of a feed on cluster specifies duration for which this feed is
-                valid on this cluster.
-                Retention specifies how long the feed is retained on this cluster and the
-                action to be taken on the feed after the expiry of retention period.
-                The retention limit is
-                specified by expression frequency(times), ex: if
-                feed should be retained for at least 6 hours then retention's limit="hours(6)".
-                The field partitionExp contains
-                partition tags. Number of partition tags has to be equal to number of partitions specified in feed
-                schema.
-                A partition tag can be a wildcard(*), a static string or
-                an expression. Atleast one of the strings has to be an expression.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="validity" name="validity"/>
-            <xs:element type="retention" name="retention"/>
-            <xs:element type="sla" name="sla" minOccurs="0" maxOccurs="1"/>
-            <xs:element type="import" name="import" minOccurs="0" maxOccurs="1"/>
-            <xs:element type="export" name="export" minOccurs="0" maxOccurs="1"/>
-            <xs:choice minOccurs="0" maxOccurs="1">
-                <xs:element type="locations" name="locations" minOccurs="0"/>
-                <xs:element type="catalog-table" name="table"/>
-            </xs:choice>
-            <xs:element type="lifecycle" name="lifecycle" minOccurs="0" />
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="cluster-type" name="type" use="optional"/>
-        <xs:attribute type="xs:string" name="partition" use="optional"/>
-        <xs:attribute type="frequency-type" name="delay" use="optional" />
-    </xs:complexType>
-    <xs:complexType name="partitions">
-        <xs:annotation>
-            <xs:documentation>
-                A list of partition, which is the logical partition of a feed and this
-                is maintained in Hcatalog registry.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="partition" name="partition" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="schema">
-        <xs:annotation>
-            <xs:documentation>A schema specifies the location of a schema file
-                for a feed and the provider of schema like protobuf, thrift etc.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="location" use="required"/>
-        <xs:attribute type="xs:string" name="provider" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="properties">
-        <xs:annotation>
-            <xs:documentation>
-                A list of name-value pair of property.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="property" name="property" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="validity">
-        <xs:annotation>
-            <xs:documentation>
-                A validity has a start, which is the validity start date and end the
-                validity
-                end date. ex: start="2011-11-01T00:00Z" in TZ format.
-                timezone can be UTC,
-                GMT.
-                Processes referring this feed would consider the validity period for
-                validation.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="date-time-type" name="start" use="required"/>
-        <xs:attribute type="date-time-type" name="end" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="sla">
-        <xs:annotation>
-            <xs:documentation>
-                sla has two properties - slaLow and slaHigh. Both slaLow and slaHigh
-                are written using expressions like frequency. slaLow is intended to serve for alerting for feeds which
-                are in danger of missing their availability SLAs. slaHigh is intended to serve for reporting the feeds
-                which missed their SLAs. SLAs are relative to feed instance time.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="frequency-type" name="slaLow" use="required"/>
-        <xs:attribute type="frequency-type" name="slaHigh" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="locations">
-        <xs:annotation>
-            <xs:documentation>
-                A list of locations on the file system.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:choice maxOccurs="unbounded" minOccurs="0">
-            <xs:element type="location" name="location"/>
-        </xs:choice>
-    </xs:complexType>
-    <xs:complexType name="late-arrival">
-        <xs:annotation>
-            <xs:documentation>
-                late-arrival specifies the cut-off period till which the feed is
-                expected to arrive late and should be honored be processes referring
-                to it as input
-                feed by rerunning the instances in case
-                the data arrives late with in a cut-off period.
-                The cut-off period is specified by expression
-                frequency(times), ex: if the feed
-                can arrive late
-                upto 8 hours then late-arrival's cut-off="hours(8)"
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="frequency-type" name="cut-off" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="property">
-        <xs:annotation>
-            <xs:documentation>
-                A key-value pair, which are propagated to the
-                workflow engine.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="value" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="clusters">
-        <xs:annotation>
-            <xs:documentation>
-                A list of clusters.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="cluster" name="cluster" maxOccurs="unbounded" minOccurs="1">
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="retention">
-        <xs:attribute type="retention-type" name="type" default="instance"/>
-        <xs:attribute type="frequency-type" name="limit" use="required"/>
-        <xs:attribute type="action-type" name="action" use="required"/>
-    </xs:complexType>
-    <xs:simpleType name="retention-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="instance"/>
-            <!-- <xs:enumeration value="age" /> -->
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="location">
-        <xs:annotation>
-            <xs:documentation>
-                location specifies the type of location like data, meta, stats
-                and the corresponding paths for them.
-                A feed should at least define the location for type
-                data, which
-                specifies the HDFS path pattern where the feed is generated
-                periodically. ex: type="data" path="/projects/TrafficHourly/${YEAR}-${MONTH}-${DAY}/traffic"
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="location-type" name="type" use="required"/>
-        <xs:attribute type="non-empty-string" name="path" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="partition">
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="notification">
-        <xs:annotation>
-            <xs:documentation>
-                Notification specifies the "type" of notification to be used to send notification.
-                Currently email based notification type is supported and user can specify the comma
-                separated email address with "to" property.
-                e.g:     type="email" to="falcon@localhost,hive@localhost"
-                "limit" property in notification will help to set the frequency of email notification
-                in case of Falcon instance failure. Incase of feed entity limit="attempt" is only supported
-                as there is no retry element.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute name="type" use="required">
-            <xs:simpleType>
-                <xs:restriction base="xs:string">
-                    <xs:enumeration value="email"/>
-                </xs:restriction>
-            </xs:simpleType>
-        </xs:attribute>
-        <xs:attribute name="level" use="optional">
-            <xs:simpleType>
-                <xs:restriction base="xs:string">
-                    <xs:enumeration value="attempt"/>
-                    <xs:enumeration value="instance"/>
-                </xs:restriction>
-            </xs:simpleType>
-        </xs:attribute>
-        <xs:attribute type="xs:string" name="to" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="ACL">
-        <xs:annotation>
-            <xs:documentation>
-                Access control list for this feed.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="owner"/>
-        <xs:attribute type="xs:string" name="group"/>
-        <xs:attribute type="xs:string" name="permission" default="*"/>
-    </xs:complexType>
-    <xs:simpleType name="action-type">
-        <xs:restriction base="xs:string">
-            <xs:annotation>
-                <xs:documentation>
-                    action type specifies the action that should be taken on a feed
-                    when the retention period of a feed expires on a cluster,
-                    the only valid action is delete.
-                </xs:documentation>
-            </xs:annotation>
-            <xs:enumeration value="delete"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="lifecycle">
-        <xs:annotation>
-            <xs:documentation>
-                Lifecycle of the feed consists of various stages. For example typical stages of a feed are import,
-                replication, archival, retention and export. All these stages together are called lifecycle of a feed.
-            </xs:documentation>
-        </xs:annotation>
-
-        <xs:all>
-            <xs:element type="retention-stage" name="retention-stage" minOccurs="0"></xs:element>
-        </xs:all>
-
-    </xs:complexType>
-
-    <xs:simpleType name="cluster-type">
-        <xs:annotation>
-            <xs:documentation>
-                The clusters on feed can be either defined as source or target,
-                a feed
-                should at least have one source cluster defined.
-                the target clusters
-                are used for
-                replication of feed.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="source"/>
-            <xs:enumeration value="target"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="location-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="data"/>
-            <xs:enumeration value="stats"/>
-            <xs:enumeration value="meta"/>
-            <xs:enumeration value="tmp"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="IDENTIFIER">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(([a-zA-Z]([\-a-zA-Z0-9])*){1,39})"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="frequency-type">
-        <xs:annotation>
-            <xs:appinfo>
-                <jaxb:javaType name="org.apache.falcon.entity.v0.Frequency"
-                               parseMethod="org.apache.falcon.entity.v0.Frequency.fromString"
-                               printMethod="org.apache.falcon.entity.v0.Frequency.toString"/>
-            </xs:appinfo>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(minutes|hours|days|months)\([1-9]\d*\)"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="date-time-type">
-        <xs:annotation>
-            <xs:appinfo>
-                <jaxb:javaType name="java.util.Date" parseMethod="org.apache.falcon.entity.v0.SchemaHelper.parseDateUTC"
-                               printMethod="org.apache.falcon.entity.v0.SchemaHelper.formatDateUTC"/>
-            </xs:appinfo>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:pattern
-                    value="((19|20)\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])T([0-1][0-9]|2[0-3]):([0-5][0-9]))Z"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="group-type">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(\w+(,\w+)*)"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="KEY_VALUE_PAIR">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="([\w_]+=[^,]+)?([,]?[ ]*[\w_]+=[^,]+)*"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="catalog-table">
-        <xs:annotation>
-            <xs:documentation>
-                catalog specifies the uri of a Hive table along with the partition spec.
-                uri="catalog:$database:$table#(partition-key=partition-value);+"
-                Example: catalog:logs-db:clicks#ds=${YEAR}-${MONTH}-${DAY}
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="uri" use="required"/>
-    </xs:complexType>
-    <xs:simpleType name="non-empty-string">
-        <xs:restriction base="xs:string">
-            <xs:minLength value="1"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="import">
-       <xs:sequence>
-            <xs:element type="datasource" name="source"/>
-            <xs:element type="arguments" name="arguments" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="export">
-        <xs:sequence>
-            <xs:element type="datasource" name="target"/>
-            <xs:element type="arguments" name="arguments" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="datasource">
-        <xs:annotation>
-            <xs:documentation>
-                Specifies the source entity name from which data can be imported or exported.
-                This can be Database or other data source types in the future. The connection
-                and authentication details of the data source are defined in the Datasource
-                entity.
-                Table name specifies the table to import or export depending on the action type.
-                Extract type specifies a extraction method (full or incremental).
-                DeltaColumn specifies the column name on source databbase table
-                to identify the new data since the last extraction.
-                Merge type specifies how the data will be organized on Hadoop.
-                The supported types are snapshot (as in a particular time) or append
-                (as in timeseries partitions).
-                Load type specifies if new rows are inserted (load type=insertallowed) into
-                the database table or updated (load type=updateonly). If updateonly load type
-                is specified, then update columns need to be passed via the arguments.
-                Fields can be specified as includes or excludes of fields. If exlusion list
-                is specified, all column except the ones specified will not be imported or exported.
-                If inclusion list is specified, only the specified columns are exported or imported.
-            </xs:documentation>
-        </xs:annotation>
-       <xs:sequence>
-           <xs:choice minOccurs="1" maxOccurs="1">
-               <xs:element type="extract" name="extract"/>
-               <xs:element type="load" name="load"/>
-           </xs:choice>
-           <xs:element type="fields-type" name="fields" minOccurs="0" maxOccurs="1"/>
-        </xs:sequence>
-        <xs:attribute type="non-empty-string" name="name" use="required"/>
-        <xs:attribute type="non-empty-string" name="tableName" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="extract">
-        <xs:sequence>
-            <xs:element type="xs:string" name="deltacolumn" minOccurs="0" maxOccurs="1"/>
-            <xs:element type="merge-type" name="mergepolicy" minOccurs="1" maxOccurs="1"/>
-        </xs:sequence>
-        <xs:attribute type="extract-method" name="type" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="load">
-        <xs:attribute type="load-method" name="type" use="required"/>
-    </xs:complexType>
-    <xs:simpleType name="extract-method">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="full"/>
-            <xs:enumeration value="incremental"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="load-method">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="updateonly"/>
-            <xs:enumeration value="allowinsert"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:simpleType name="merge-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="snapshot"/>
-            <xs:enumeration value="append"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="fields-type">
-        <xs:annotation>
-            <xs:documentation>
-                Specifies either an include or exclude fields list. If include field list is specified, only
-                the specified fields will be imported. If exclude field list is specified, all fields except
-                the ones specified will be imported from datasource to HDFS.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:choice minOccurs="1" maxOccurs="1">
-            <xs:element type="field-include-exclude" name="includes"/>
-            <xs:element type="field-include-exclude" name="excludes"/>
-        </xs:choice>
-    </xs:complexType>
-    <xs:complexType name="field-include-exclude">
-        <xs:sequence>
-            <xs:element type="xs:string" name="field" maxOccurs="unbounded" minOccurs="1"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="arguments">
-        <xs:annotation>
-            <xs:documentation>
-                A list of name-value pair of extra arguments to be passed to the concrete implementation.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="argument" name="argument" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="argument">
-        <xs:annotation>
-            <xs:documentation>
-                A key-value pair, which are used while invoking
-                ingestion engines.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="value" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="retention-stage">
-        <xs:annotation>
-            <xs:documentation>
-                Retention stage is the new way to define retention for a feed using feed lifecycle feature. Retention
-                has a configurable policy which does the validation and the real execution through workflow engine.
-                This method of specifying retention gives you more control like using different queue name, priority
-                and execution-order for retention than other lifecycle stages of feed like replication.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:all>
-            <xs:element type="non-empty-string" name="policy" minOccurs="0" maxOccurs="1"></xs:element>
-            <xs:element type="frequency-type" name="frequency" minOccurs="0" maxOccurs="1"></xs:element>
-            <xs:element type="xs:string" name="queue" minOccurs="0" maxOccurs="1"></xs:element>
-            <xs:element type="xs:string" name="priority" minOccurs="0" maxOccurs="1"></xs:element>
-            <xs:element type="properties" name="properties" minOccurs="0" maxOccurs="1"></xs:element>
-        </xs:all>
-    </xs:complexType>
-</xs:schema>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/jaxb-binding.xjb
----------------------------------------------------------------------
diff --git a/client/src/main/resources/jaxb-binding.xjb b/client/src/main/resources/jaxb-binding.xjb
deleted file mode 100644
index 978145f..0000000
--- a/client/src/main/resources/jaxb-binding.xjb
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<jaxb:bindings
-        version="2.1"
-        jaxb:extensionBindingPrefixes="xjc jaxb xs inheritance annox"
-        xmlns:jaxb="http://java.sun.com/xml/ns/jaxb"
-        xmlns:xjc="http://java.sun.com/xml/ns/jaxb/xjc"
-        xmlns:inheritance="http://jaxb2-commons.dev.java.net/basic/inheritance"
-        xmlns:xs="http://www.w3.org/2001/XMLSchema">
-
-    <jaxb:bindings schemaLocation="cluster-0.1.xsd" node="//xs:complexType[@name='cluster']">
-        <inheritance:extends>org.apache.falcon.entity.v0.Entity</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="cluster-0.1.xsd" node="//xs:complexType[@name='ACL']">
-        <inheritance:extends>org.apache.falcon.entity.v0.AccessControlList</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="feed-0.1.xsd" node="//xs:complexType[@name='feed']">
-        <inheritance:extends>org.apache.falcon.entity.v0.Entity</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="feed-0.1.xsd" node="//xs:complexType[@name='ACL']">
-        <inheritance:extends>org.apache.falcon.entity.v0.AccessControlList</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="feed-0.1.xsd" node="//xs:complexType[@name='notification']">
-        <inheritance:extends>org.apache.falcon.entity.v0.EntityNotification</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="process-0.1.xsd" node="//xs:complexType[@name='process']">
-        <inheritance:extends>org.apache.falcon.entity.v0.Entity</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="process-0.1.xsd" node="//xs:complexType[@name='ACL']">
-        <inheritance:extends>org.apache.falcon.entity.v0.AccessControlList</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="process-0.1.xsd" node="//xs:complexType[@name='notification']">
-        <inheritance:extends>org.apache.falcon.entity.v0.EntityNotification</inheritance:extends>
-    </jaxb:bindings>
-
-
-    <jaxb:bindings schemaLocation="datasource-0.1.xsd" node="//xs:complexType[@name='datasource']">
-         <inheritance:extends>org.apache.falcon.entity.v0.Entity</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:bindings schemaLocation="datasource-0.1.xsd" node="//xs:complexType[@name='ACL']">
-        <inheritance:extends>org.apache.falcon.entity.v0.AccessControlList</inheritance:extends>
-    </jaxb:bindings>
-
-    <jaxb:globalBindings>
-        <xjc:simple/>
-    </jaxb:globalBindings>
-
-</jaxb:bindings>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/mysql_database.xml
----------------------------------------------------------------------
diff --git a/client/src/main/resources/mysql_database.xml b/client/src/main/resources/mysql_database.xml
deleted file mode 100644
index 5f88ba4..0000000
--- a/client/src/main/resources/mysql_database.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<database colo="west-coast" description="MySQL database on west coast" type="mysql" name="mysql-db" xmlns="uri:falcon:database:0.1">
-    <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-    <interfaces>
-
-        <!-- ***** read interface ***** -->
-        <interface type="readonly" endpoint="jdbc:mysql://c6402/test">
-            <credential type="password-file">
-                <userName>sqoop_user</userName>
-                <passwordFile>/user/ambari-qa/password-store/password_read_user</passwordFile>
-            </credential>
-        </interface>
-
-        <!-- ***** write interface ***** -->
-        <interface type="write"  endpoint="jdbc:mysql://c6402/test">
-            <credential type="password-file">
-                <userName>sqoop2_user</userName>
-                <passwordFile>/user/ambari-qa/password-store/password_write_user</passwordFile>
-            </credential>
-        </interface>
-
-        <!-- ***** default credential ***** -->
-        <credential type="password-file">
-            <userName>sqoop2_user</userName>
-            <passwordFile>/user/ambari-qa/password-store/password_write_user</passwordFile>
-        </credential>
-
-    </interfaces>
-</database>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/process-0.1.xsd
----------------------------------------------------------------------
diff --git a/client/src/main/resources/process-0.1.xsd b/client/src/main/resources/process-0.1.xsd
deleted file mode 100644
index 9d7898f..0000000
--- a/client/src/main/resources/process-0.1.xsd
+++ /dev/null
@@ -1,443 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema"
-           targetNamespace="uri:falcon:process:0.1" xmlns="uri:falcon:process:0.1"
-           xmlns:jaxb="http://java.sun.com/xml/ns/jaxb" jaxb:version="2.1">
-    <xs:annotation>
-        <xs:documentation>
-            Licensed to the Apache Software Foundation (ASF) under one or more
-            contributor license agreements. See the NOTICE file distributed with
-            this work for additional information regarding copyright ownership.
-            The ASF licenses this file to You under the Apache License, Version
-            2.0 (the "License"); you may not use this file except in compliance with
-            the License. You may obtain a copy of the License at
-
-            http://www.apache.org/licenses/LICENSE-2.0
-
-            Unless required by applicable law or agreed to in writing, software
-            distributed under the License is distributed on an "AS IS" BASIS,
-            WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-            See the License for the specific language governing permissions and
-            limitations under the License.
-        </xs:documentation>
-        <xs:appinfo>
-            <jaxb:schemaBindings>
-                <jaxb:package name="org.apache.falcon.entity.v0.process"/>
-            </jaxb:schemaBindings>
-        </xs:appinfo>
-    </xs:annotation>
-
-    <xs:element name="process" type="process">
-        <xs:annotation>
-            <xs:documentation>
-                A process defines configuration for the workflow job like
-                frequency of the workflow, inputs and outputs for the workflow, how to
-                handle workflow failures, how to handle data that comes late and so on.
-            </xs:documentation>
-        </xs:annotation>
-    </xs:element>
-    <xs:complexType name="process">
-        <xs:sequence>
-            <xs:element type="KEY_VALUE_PAIR" name="tags" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>
-                        tags: a process specifies an optional list of comma separated tags,
-                        Key Value Pairs, separated by comma,
-                        which is used for classification of processes.
-                        Example: consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="PIPELINE_LIST" name="pipelines" minOccurs="0" maxOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>
-                        pipelines: a process specifies an optional list of comma separated pipelines,
-                        separated by comma, which is used for classification of processes.
-                        Example: dataReplicationPipeline, clickStreamPipeline
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="clusters" name="clusters">
-                <xs:annotation>
-                    <xs:documentation>Defines the clusters where the workflow should run
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element name="parallel">
-                <xs:annotation>
-                    <xs:documentation>Defines how many workflow instances can run concurrently
-                    </xs:documentation>
-                </xs:annotation>
-                <xs:simpleType>
-                    <xs:restriction base="xs:unsignedShort">
-                        <xs:minInclusive value="1"/>
-                        <xs:maxInclusive value="12"/>
-                    </xs:restriction>
-                </xs:simpleType>
-            </xs:element>
-            <xs:element type="execution-type" name="order">
-                <xs:annotation>
-                    <xs:documentation>Defines the order in which ready workflow instances should run
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="frequency-type" name="timeout" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Defines time after which instances will no longer be executed
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="frequency-type" name="frequency">
-                <xs:annotation>
-                    <xs:documentation>Defines workflow frequency
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="sla" name="sla" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>
-                        Defines SLA(Service Level Agreement) for process.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element name="timezone" minOccurs="0" default="UTC">
-                <xs:simpleType>
-                    <xs:annotation>
-                        <xs:appinfo>
-                            <jaxb:javaType name="java.util.TimeZone" parseMethod="java.util.TimeZone.getTimeZone"
-                                           printMethod="org.apache.falcon.entity.v0.SchemaHelper.getTimeZoneId"/>
-                        </xs:appinfo>
-                    </xs:annotation>
-                    <xs:restriction base="xs:string"/>
-                </xs:simpleType>
-            </xs:element>
-            <xs:element type="inputs" name="inputs" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Defines inputs for the workflow. The workflow will run only when the scheduled
-                        time is up and all the inputs are available
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="outputs" name="outputs" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Defines outputs of the workflow
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="properties" name="properties" minOccurs="0">
-            </xs:element>
-            <xs:element type="workflow" name="workflow">
-                <xs:annotation>
-                    <xs:documentation>Defines the workflow that should run. The workflow should be defined with respect
-                        to the workflow specification of the workflow engine.
-                        Only
-                        oozie workflow engine is supported as of now. The workflow path is the path on hdfs which
-                        contains the workflow xml
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="retry" name="retry" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Retry defines how to handle workflow failures. Policy type - periodic, exponential
-                        backoff, final along with the delay define how frequently the workflow should be re-tried. Number of
-                        attempts define how many times to re-try the failures.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="late-process" name="late-process" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Late process defines how the late data should be handled. Late policy - backoff,
-                        exponential backoff, final along with delay
-                        define how
-                        frequently Falcon should check for late data. The late data handling can be customized for each
-                        input separatly.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="notification" name="notification" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>Notification will help to notify the users about the finished status of Falcon
-                        Instance. Currently Email type notification is supported and users must specify the receiver's
-                        email address.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="ACL" name="ACL" minOccurs="0"/>
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-    </xs:complexType>
-
-    <xs:simpleType name="IDENTIFIER">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(([a-zA-Z]([\-a-zA-Z0-9])*){1,39})"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="clusters">
-        <xs:annotation>
-            <xs:documentation>
-                A list of clusters.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="cluster" name="cluster" maxOccurs="unbounded" minOccurs="1">
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-
-    <xs:complexType name="cluster">
-        <xs:annotation>
-            <xs:documentation>
-                Defines the cluster where the workflow should run. In addition, it also defines the validity of the
-                workflow on this cluster
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="validity" name="validity"/>
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-    </xs:complexType>
-
-    <xs:complexType name="validity">
-        <xs:annotation>
-            <xs:documentation>
-                Defines the vailidity of the workflow as start and end time
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="date-time-type" name="start" use="required"/>
-        <xs:attribute type="date-time-type" name="end" use="required"/>
-    </xs:complexType>
-
-    <xs:simpleType name="date-time-type">
-        <xs:annotation>
-            <xs:appinfo>
-                <jaxb:javaType name="java.util.Date" parseMethod="org.apache.falcon.entity.v0.SchemaHelper.parseDateUTC"
-                               printMethod="org.apache.falcon.entity.v0.SchemaHelper.formatDateUTC"/>
-            </xs:appinfo>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:pattern
-                    value="((1|2)\d\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])T([0-1][0-9]|2[0-3]):([0-5][0-9]))Z"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:simpleType name="execution-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="FIFO"/>
-            <xs:enumeration value="LIFO"/>
-            <xs:enumeration value="LAST_ONLY"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:simpleType name="frequency-type">
-        <xs:annotation>
-            <xs:appinfo>
-                <jaxb:javaType name="org.apache.falcon.entity.v0.Frequency"
-                               parseMethod="org.apache.falcon.entity.v0.Frequency.fromString"
-                               printMethod="org.apache.falcon.entity.v0.Frequency.toString"/>
-            </xs:appinfo>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(minutes|hours|days|months)\([1-9]\d*\)"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="sla">
-        <xs:annotation>
-            <xs:documentation>
-                sla has 2 optional attributes - shouldStartIn and shouldEndIn. All the attributes
-                are written using expressions like frequency. shouldStartIn is the time in which the process should have
-                started. shouldEndIn is the time in which the process should have finished.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="frequency-type" name="shouldStartIn"/>
-        <xs:attribute type="frequency-type" name="shouldEndIn" />
-    </xs:complexType>
-
-    <xs:complexType name="inputs">
-        <xs:sequence>
-            <xs:element type="input" name="input" maxOccurs="unbounded" minOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>Defines input for the workflow. Each input maps to a feed. Input path and
-                        frequency are picked from feed definition.
-                        The input specifies the
-                        start and end instance for the workflow. Falcon creates a property with input name which
-                        contains paths of all input
-                        instances between start and end. This
-                        property will be available for the workflow to read inputs.
-                        Input can also optionally specify the specific partition of feed that the workflow needs.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-
-    <xs:complexType name="input">
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="IDENTIFIER" name="feed" use="required"/>
-        <xs:attribute type="xs:string" name="start" use="required"/>
-        <xs:attribute type="xs:string" name="end" use="required"/>
-        <xs:attribute type="xs:string" name="partition" use="optional"/>
-        <xs:attribute type="xs:boolean" name="optional" use="optional" default="false"/>
-    </xs:complexType>
-
-    <xs:complexType name="outputs">
-        <xs:sequence>
-            <xs:element type="output" name="output" maxOccurs="unbounded" minOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>Each output maps to a feed. The Output path and frequency are picked from the
-                        corresponding feed definition.
-                        The output also specifies the
-                        instance that is created in terms of EL expression.
-                        For each output, Falcon creates a property with the output name which can be used in workflows
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-
-    <xs:complexType name="output">
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="IDENTIFIER" name="feed" use="required"/>
-        <xs:attribute type="xs:string" name="instance" use="required"/>
-    </xs:complexType>
-
-    <xs:complexType name="workflow">
-        <xs:attribute type="xs:string" name="name" use="optional"/>
-        <xs:attribute type="xs:string" name="version" use="optional" default="1.0"/>
-        <xs:attribute type="engine-type" name="engine" use="optional" default="oozie"/>
-        <xs:attribute type="xs:string" name="path" use="required"/>
-        <xs:attribute type="xs:string" name="lib" use="optional"/>
-    </xs:complexType>
-
-    <xs:simpleType name="engine-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="oozie"/>
-            <xs:enumeration value="pig"/>
-            <xs:enumeration value="hive"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="retry">
-        <xs:attribute type="policy-type" name="policy" use="required"/>
-        <xs:attribute type="frequency-type" name="delay" use="required"/>
-        <xs:attribute name="attempts" use="required">
-            <xs:simpleType>
-                <xs:restriction base="xs:unsignedShort">
-                    <xs:minInclusive value="1"/>
-                </xs:restriction>
-            </xs:simpleType>
-        </xs:attribute>
-        <xs:attribute type="xs:boolean" name="onTimeout" use="optional" default="false"/>
-    </xs:complexType>
-
-    <xs:simpleType name="policy-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="periodic"/>
-            <xs:enumeration value="exp-backoff"/>
-            <xs:enumeration value="final"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="late-process">
-        <xs:sequence>
-            <xs:element type="late-input" name="late-input" maxOccurs="unbounded" minOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>
-                        For each input, defines the workflow that should be run when late data is detected
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-        </xs:sequence>
-        <xs:attribute type="policy-type" name="policy" use="required"/>
-        <xs:attribute type="frequency-type" name="delay" use="required"/>
-    </xs:complexType>
-
-    <xs:complexType name="late-input">
-        <xs:attribute type="IDENTIFIER" name="input" use="required"/>
-        <xs:attribute type="xs:string" name="workflow-path" use="required"/>
-    </xs:complexType>
-
-    <xs:complexType name="properties">
-        <xs:sequence>
-            <xs:element type="property" name="property" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="property">
-        <xs:attribute type="xs:string" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="value" use="required"/>
-    </xs:complexType>
-
-    <xs:simpleType name="KEY_VALUE_PAIR">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="([\w_]+=[^,]+)?([,]?[ ]*[\w_]+=[^,]+)*"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="PIPELINE_LIST">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="([\w+_]+)([,]?[ ]*([\w+_]+))*"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="ACL">
-        <xs:annotation>
-            <xs:documentation>
-                Access control list for this process.
-                owner is the Owner of this entity.
-                group is the one which has access to read - not used at this time.
-                permission is not enforced at this time
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="owner"/>
-        <xs:attribute type="xs:string" name="group"/>
-        <xs:attribute type="xs:string" name="permission" default="*"/>
-    </xs:complexType>
-
-    <xs:complexType name="notification">
-        <xs:annotation>
-            <xs:documentation>
-                Notification specifies the "type" of notification to be used to send notification.
-                Currently email based notification type is supported and user can specify the comma
-                separated email address with "to" property.
-                e.g:     type="email" to="falcon@localhost,hive@localhost"
-                "limit" property in notification will help to set the frequency of email notification
-                in case of Falcon instance failure.
-                If limit="attempt" is set, for every instance failure email will be sent.
-                If limit="final" is set, failure email will be sent only when all the attempts has been
-                tried defined with retry element.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute name="type" use="required">
-            <xs:simpleType>
-                <xs:restriction base="xs:string">
-                    <xs:enumeration value="email"/>
-                </xs:restriction>
-            </xs:simpleType>
-        </xs:attribute>
-        <xs:attribute name="level" use="optional">
-            <xs:simpleType>
-                <xs:restriction base="xs:string">
-                    <xs:enumeration value="attempt"/>
-                    <xs:enumeration value="instance"/>
-                </xs:restriction>
-            </xs:simpleType>
-        </xs:attribute>
-        <xs:attribute type="xs:string" name="to" use="required"/>
-    </xs:complexType>
-</xs:schema>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/test/java/org/apache/falcon/cli/TestCLIParser.java
----------------------------------------------------------------------
diff --git a/client/src/test/java/org/apache/falcon/cli/TestCLIParser.java b/client/src/test/java/org/apache/falcon/cli/TestCLIParser.java
deleted file mode 100644
index 6ac3e9f..0000000
--- a/client/src/test/java/org/apache/falcon/cli/TestCLIParser.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- * Command parser for CLI.
- */
-public class TestCLIParser {
-
-    @Test
-    public void testEmptyParser() throws Exception {
-        try {
-            CLIParser parser = new CLIParser("falcon", new String[]{});
-            CLIParser.Command c = parser.parse(new String[]{"a"});
-            Assert.fail();
-        } catch (ParseException ex) {
-            // nop
-        }
-    }
-
-    @Test
-    public void testCommandParser() throws Exception {
-        try {
-            CLIParser parser = new CLIParser("oozie", new String[]{});
-            parser.addCommand("a", "<A>", "AAAAA", new Options(), false);
-            CLIParser.Command c = parser.parse(new String[]{"a", "b"});
-            Assert.assertEquals("a", c.getName());
-            Assert.assertEquals("b", c.getCommandLine().getArgs()[0]);
-        } catch (ParseException ex) {
-            Assert.fail();
-        }
-    }
-
-    @Test
-    public void testCommandParserX() throws Exception {
-        Option opt = new Option("o", false, "O");
-        Options opts = new Options();
-        opts.addOption(opt);
-        CLIParser parser = new CLIParser("test", new String[]{});
-        parser.addCommand("c", "-X ",
-                "(everything after '-X' are pass-through parameters)", opts,
-                true);
-        CLIParser.Command c = parser.parse("c -o -X -o c".split(" "));
-        Assert.assertEquals("-X", c.getCommandLine().getArgList().get(0));
-        Assert.assertEquals(3, c.getCommandLine().getArgList().size());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/test/java/org/apache/falcon/entity/v0/DateValidatorTest.java
----------------------------------------------------------------------
diff --git a/client/src/test/java/org/apache/falcon/entity/v0/DateValidatorTest.java b/client/src/test/java/org/apache/falcon/entity/v0/DateValidatorTest.java
deleted file mode 100644
index a1adb24..0000000
--- a/client/src/test/java/org/apache/falcon/entity/v0/DateValidatorTest.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.testng.Assert;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-/**
- * Date format yyyy/mm/dd validator Testing.
- */
-public class DateValidatorTest {
-
-    @DataProvider
-    public Object[][] validDateProvider() {
-        return new Object[][]{
-            new Object[]{"2011-11-01T00:00Z", }, new Object[]{"2020-01-01T00:00Z", },
-            new Object[]{"2010-01-31T00:59Z", }, new Object[]{"2020-01-31T00:00Z", },
-            new Object[]{"2008-02-29T01:00Z", }, new Object[]{"2008-02-29T00:00Z", },
-            new Object[]{"2009-02-28T01:01Z", }, new Object[]{"2009-02-28T00:00Z", },
-            new Object[]{"2010-03-31T23:00Z", }, new Object[]{"2010-03-31T00:00Z", },
-            new Object[]{"2010-04-30T23:59Z", }, new Object[]{"2010-04-30T00:00Z", },
-            new Object[]{"2010-05-31T23:23Z", }, new Object[]{"2010-05-31T00:00Z", },
-            new Object[]{"2010-06-30T00:00Z", }, new Object[]{"2010-06-30T00:00Z", },
-            new Object[]{"2010-07-31T00:00Z", }, new Object[]{"2010-07-31T00:00Z", },
-            new Object[]{"2010-08-31T00:00Z", }, new Object[]{"2010-08-31T00:00Z", },
-            new Object[]{"2010-09-30T00:00Z", }, new Object[]{"2010-09-30T00:00Z", },
-            new Object[]{"2010-10-31T00:00Z", }, new Object[]{"2010-10-31T00:00Z", },
-            new Object[]{"2010-11-30T00:00Z", }, new Object[]{"2010-11-30T00:00Z", },
-            new Object[]{"2010-12-31T00:00Z", }, new Object[]{"2010-12-31T00:00Z", },
-            new Object[]{"1999-01-30T01:00Z", }, new Object[]{"2999-12-31T00:00Z", },
-        };
-    }
-
-    @DataProvider
-    public Object[][] invalidDateProvider() {
-        return new Object[][]{
-            new Object[]{"2010-12-31T00:60Z", }, new Object[]{"2010-12-31T24:00Z", },
-            new Object[]{"2010-01-32T00:00Z", }, new Object[]{"2020-01-32T00:00Z", },
-            new Object[]{"2010-13-1T00:00Z", }, new Object[]{"1820-01-01T00:00Z", },
-            new Object[]{"2007-2-29T00:00Z", }, new Object[]{"2007-02-29T00:00Z", },
-            new Object[]{"2008-2-30T00:00Z", }, new Object[]{"2008-02-31T00:00Z", },
-            new Object[]{"2008-a-29T00:00Z", }, new Object[]{"2008-02aT00:00Z", },
-            new Object[]{"2008-2-333T00:00Z", }, new Object[]{"200a-02-29T00:00Z", },
-            new Object[]{"2010-4-31T00:00Z", }, new Object[]{"2010-04-31T00:00Z", },
-            new Object[]{"2010-6-31T00:00Z", }, new Object[]{"2010-06-31T00:00Z", },
-            new Object[]{"2010-9-31T00:00Z", }, new Object[]{"2010-09-31T00:00Z", },
-            new Object[]{"2010-11-31T00:00Z", }, new Object[]{"1999-04-31T01:00Z", },
-            new Object[]{null, }, new Object[]{"", }, new Object[]{"      ", },
-        };
-    }
-
-    @Test(dataProvider = "validDateProvider")
-    public void validDateTest(String date) {
-        boolean valid = DateValidator.validate(date);
-        System.out.println("Date is valid : " + date + " , " + valid);
-        Assert.assertEquals(valid, true);
-    }
-
-    @Test(dataProvider = "invalidDateProvider",
-            dependsOnMethods = "validDateTest")
-    public void invalidDateTest(String date) {
-        boolean valid = DateValidator.validate(date);
-        System.out.println("Date is valid : " + date + " , " + valid);
-        Assert.assertEquals(valid, false);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/test/java/org/apache/falcon/entity/v0/TestFrequency.java
----------------------------------------------------------------------
diff --git a/client/src/test/java/org/apache/falcon/entity/v0/TestFrequency.java b/client/src/test/java/org/apache/falcon/entity/v0/TestFrequency.java
deleted file mode 100644
index 56e442f..0000000
--- a/client/src/test/java/org/apache/falcon/entity/v0/TestFrequency.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-/**
- * Test frequency.
- */
-@Test
-public class TestFrequency {
-
-    public void testValidFrequency() {
-        String freqStr = "minutes(10)";
-        Frequency freq = Frequency.fromString(freqStr);
-        Assert.assertEquals(freq.getTimeUnit().name(), "minutes");
-        Assert.assertEquals(freq.getFrequency(), "10");
-        Assert.assertEquals(freq.getFrequencyAsInt(), 10);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/test/java/org/apache/falcon/resource/LineageGraphResultTest.java
----------------------------------------------------------------------
diff --git a/client/src/test/java/org/apache/falcon/resource/LineageGraphResultTest.java b/client/src/test/java/org/apache/falcon/resource/LineageGraphResultTest.java
deleted file mode 100644
index 058d097..0000000
--- a/client/src/test/java/org/apache/falcon/resource/LineageGraphResultTest.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.testng.Assert;
-import org.testng.annotations.Test;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Test frequency.
- */
-@Test
-public class LineageGraphResultTest {
-
-    @Test
-    public void testEquals() {
-        Set<LineageGraphResult.Edge> set1 = new HashSet<>();
-        Set<LineageGraphResult.Edge> set2 = new HashSet<>();
-
-        List<String> from =  Arrays.asList(new String[]{"from1", "from2", "from3"});
-        List<String> to =  Arrays.asList(new String[]{"to1", "to2", "to3"});
-        List<String> label =  Arrays.asList(new String[]{"label1", "label2", "label3"});
-
-        for (int i = 0; i < 3; i++) {
-            set1.add(new LineageGraphResult.Edge(from.get(i), to.get(i), label.get(i)));
-            set2.add(new LineageGraphResult.Edge(from.get(i), to.get(i), label.get(i)));
-        }
-        Assert.assertEquals(set1, set2);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
deleted file mode 100644
index 2e01282..0000000
--- a/common/pom.xml
+++ /dev/null
@@ -1,222 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-common</artifactId>
-    <description>Apache Falcon Common Module</description>
-    <name>Apache Falcon Commons</name>
-    <packaging>jar</packaging>
-
-    <profiles>
-        <profile>
-            <id>hadoop-2</id>
-            <activation>
-                <activeByDefault>true</activeByDefault>
-            </activation>
-            <dependencies>
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-client</artifactId>
-                </dependency>
-                <dependency>
-                       <groupId>org.apache.hadoop</groupId>
-                       <artifactId>hadoop-hdfs</artifactId>
-                </dependency>
-                <dependency>
-                       <groupId>org.apache.hadoop</groupId>
-                       <artifactId>hadoop-hdfs</artifactId>
-                       <classifier>tests</classifier>
-                </dependency>
-                <dependency>
-                       <groupId>org.apache.hadoop</groupId>
-                       <artifactId>hadoop-common</artifactId>
-                       <classifier>tests</classifier>
-                </dependency>
-                <dependency>
-                       <groupId>org.apache.hadoop</groupId>
-                       <artifactId>hadoop-common</artifactId>
-                </dependency>
-                <dependency>
-                      <groupId>org.apache.hadoop</groupId>
-                      <artifactId>hadoop-aws</artifactId>
-                      <version>${hadoop.version}</version>
-                      <scope>provided</scope>
-                 </dependency>
-            </dependencies>
-        </profile>
-    </profiles>
-
-    <dependencies>
-        <dependency>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-lang3</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.falcon</groupId>
-            <artifactId>falcon-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.falcon</groupId>
-            <artifactId>falcon-metrics</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-el</groupId>
-            <artifactId>commons-el</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.servlet.jsp</groupId>
-            <artifactId>jsp-api</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.xml.bind</groupId>
-            <artifactId>jaxb-api</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.codehaus.jettison</groupId>
-            <artifactId>jettison</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.springframework</groupId>
-            <artifactId>spring-jms</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.springframework</groupId>
-            <artifactId>spring-beans</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.falcon</groupId>
-            <artifactId>falcon-test-util</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-all</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.github.stephenc.findbugs</groupId>
-            <artifactId>findbugs-annotations</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.jms</groupId>
-            <artifactId>jms</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hive</groupId>
-            <artifactId>hive-common</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.hive.hcatalog</groupId>
-            <artifactId>hive-webhcat-java-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.googlecode.json-simple</groupId>
-            <artifactId>json-simple</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.tinkerpop.blueprints</groupId>
-            <artifactId>blueprints-core</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.thinkaurelius.titan</groupId>
-            <artifactId>titan-core-jre6</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.thinkaurelius.titan</groupId>
-            <artifactId>titan-berkeleyje-jre6</artifactId>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <configuration>
-                    <source>1.7</source>
-                    <target>1.7</target>
-                </configuration>
-            </plugin>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-jar-plugin</artifactId>
-                <version>2.4</version>
-                <configuration>
-                    <excludes>
-                        <exclude>**/log4j.xml</exclude>
-                    </excludes>
-                </configuration>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>test-jar</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/FalconException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/FalconException.java b/common/src/main/java/org/apache/falcon/FalconException.java
deleted file mode 100644
index 391700e..0000000
--- a/common/src/main/java/org/apache/falcon/FalconException.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon;
-
-/**
- * Common Exception thrown from Parsers. To the Falcon API class
- */
-public class FalconException extends Exception {
-
-    /**
-     * @param e Exception
-     */
-    public FalconException(Throwable e) {
-        super(e);
-    }
-
-    public FalconException(String message, Throwable e) {
-        super(message, e);
-    }
-
-    /**
-     * @param message - custom exception message
-     */
-    public FalconException(String message) {
-        super(message);
-    }
-
-    /**
-     *
-     */
-    private static final long serialVersionUID = -1475818869309247014L;
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/FalconRuntimException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/FalconRuntimException.java b/common/src/main/java/org/apache/falcon/FalconRuntimException.java
deleted file mode 100644
index e465f09..0000000
--- a/common/src/main/java/org/apache/falcon/FalconRuntimException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon;
-
-/**
- * Runtime Exception class for Falcon application.
- */
-public class FalconRuntimException extends RuntimeException {
-
-    /**
-     *
-     */
-    private static final long serialVersionUID = 1105135528999858955L;
-
-    public FalconRuntimException(Exception e) {
-        super(e);
-    }
-
-    public FalconRuntimException(String message) {
-        super(message);
-    }
-
-    public FalconRuntimException(String message, Throwable e) {
-        super(message);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java b/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
deleted file mode 100644
index 41d50df..0000000
--- a/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.catalog;
-
-import org.apache.falcon.FalconException;
-import org.apache.hadoop.conf.Configuration;
-
-import java.util.List;
-
-/**
- * Interface definition for a catalog registry service
- * such as Hive or HCatalog.
- */
-public abstract class AbstractCatalogService {
-
-    /**
-     * This method checks if the catalog service is alive.
-     *
-     * @param conf conf
-     * @param catalogUrl url for the catalog service
-     * @return if the service was reachable
-     * @throws FalconException exception
-     */
-    public abstract boolean isAlive(Configuration conf, String catalogUrl) throws FalconException;
-
-    /**
-     * This method checks if the given table exists in the catalog.
-     *
-     * @param conf  conf
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @return if the table exists
-     * @throws FalconException exception
-     */
-    public abstract boolean tableExists(Configuration conf, String catalogUrl,
-                                        String database, String tableName) throws FalconException;
-
-    /**
-     * Returns if the table is external or not. Executed in the workflow engine.
-     *
-     * @param conf conf object
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @return true if external else false
-     * @throws FalconException
-     */
-    public abstract boolean isTableExternal(Configuration conf, String catalogUrl, String database,
-                                            String tableName) throws FalconException;
-
-    public abstract List<CatalogPartition> listPartitions(Configuration conf, String catalogUrl,
-                                                          String database, String tableName,
-                                                          List<String> values) throws FalconException;
-
-    /**
-     * List partitions by filter. Executed in the workflow engine.
-     *
-     * @param conf conf object
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @param filter The filter string,
-     *    for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
-     *    be done only on string partition keys.
-     * @return list of partitions
-     * @throws FalconException
-     */
-    public abstract List<CatalogPartition> listPartitionsByFilter(Configuration conf,
-                                                                  String catalogUrl,
-                                                                  String database,
-                                                                  String tableName, String filter)
-        throws FalconException;
-
-    /**
-     * Drops a given partition. Executed in the workflow engine.
-     *
-     * @param conf  conf object
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @param partitionValues list of partition values
-     * @param deleteData should dropPartition also delete the corresponding data
-     * @return if the partition was dropped
-     * @throws FalconException
-     */
-    public abstract boolean dropPartition(Configuration conf, String catalogUrl,
-                                           String database, String tableName, List<String> partitionValues,
-                                           boolean deleteData) throws FalconException;
-
-    /**
-     * Drops the partitions. Executed in the workflow engine.
-     *
-     * @param conf  conf object
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @param partitionValues list of partition values
-     * @param deleteData should dropPartition also delete the corresponding data
-     * @return if the partition was dropped
-     * @throws FalconException
-     */
-    public abstract void dropPartitions(Configuration conf, String catalogUrl,
-                                        String database, String tableName,
-                                        List<String> partitionValues, boolean deleteData) throws FalconException;
-
-    /**
-     * Gets the partition. Executed in the workflow engine.
-     *
-     *
-     * @param conf  conf
-     * @param catalogUrl url for the catalog service
-     * @param database database the table belongs to
-     * @param tableName tableName to check if it exists
-     * @param partitionValues Values for partition columns.
-     * @return An instance of CatalogPartition.
-     * @throws FalconException
-     */
-    public abstract CatalogPartition getPartition(Configuration conf, String catalogUrl,
-                                                  String database, String tableName,
-                                                  List<String> partitionValues)
-        throws FalconException;
-
-    /**
-     * Gets the partition columns for the table in catalog service.
-     * @param conf
-     * @param catalogUrl url for the catalog service
-     * @param database
-     * @param tableName
-     * @return ordered list of partition columns for the table
-     * @throws FalconException
-     */
-    public abstract List<String> getPartitionColumns(Configuration conf, String catalogUrl, String database,
-                                                     String tableName) throws FalconException;
-
-    /**
-     * Adds the partition to the table.
-     * @param conf
-     * @param catalogUrl
-     * @param database
-     * @param tableName
-     * @param values
-     * @param location
-     * @throws FalconException
-     */
-    public abstract void addPartition(Configuration conf, String catalogUrl, String database,
-                                      String tableName, List<String> values, String location) throws FalconException;
-
-    /**
-     * Updates an existing partition in the table.
-     * @param conf
-     * @param catalogUrl
-     * @param database
-     * @param tableName
-     * @param partValues
-     * @param location
-     * @throws FalconException
-     */
-    public abstract void updatePartition(Configuration conf, String catalogUrl, String database, String tableName,
-                                         List<String> partValues, String location) throws FalconException;
-}


[10/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ClusterWizardPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ClusterWizardPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ClusterWizardPage.java
deleted file mode 100644
index 41fc120..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/ClusterWizardPage.java
+++ /dev/null
@@ -1,556 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.cluster.ACL;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.testng.Assert;
-
-import java.util.List;
-
-/** Page object of the Cluster creation page. */
-public class ClusterWizardPage extends EntityWizardPage {
-    private static final Logger LOGGER = Logger.getLogger(ClusterWizardPage.class);
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "clusterForm")
-    })
-    private WebElement clusterBox;
-    @FindBy(id = "cluster.step1")
-    private WebElement next;
-    @FindBy(id = "cluster.step2")
-    private WebElement save;
-    @FindBy(id = "cluster.backToStep1")
-    private WebElement previous;
-    @FindBy(xpath = "//a[contains(text(), 'Cancel')]")
-    private WebElement cancel;
-    @FindBy(xpath = "//div[contains(@class, 'clusterSummaryRow')][h4]")
-    private WebElement summaryBox;
-
-    public ClusterWizardPage(WebDriver driver) {
-        super(driver);
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(clusterBox, "Cluster box");
-    }
-
-    /**
-     * Fills cluster setup forms with values retrieved from Filling object.
-     */
-    public void fillForm(ClusterMerlin cluster) {
-        setName(cluster.getName());
-        setColo(cluster.getColo());
-        setDescription(cluster.getDescription());
-        setTags(cluster.getTags());
-        ACL acl = cluster.getACL();
-        setOwner(acl.getOwner());
-        setGroup(acl.getGroup());
-        setPermissions(acl.getPermission());
-        for(Interface iface : cluster.getInterfaces().getInterfaces()) {
-            setInterface(iface);
-        }
-        for (Property property : cluster.getProperties().getProperties()) {
-            addProperty(property.getName(), property.getValue());
-        }
-        setLocations(cluster.getLocations().getLocations());
-        waitForAngularToFinish();
-    }
-
-    /**
-     * Methods to fill specific wizard fields.
-     */
-    public void setName(String name) {
-        WebElement nameInput = getNameInput();
-        nameInput.clear();
-        sendKeysSlowly(nameInput, name);
-    }
-
-    public String getName() {
-        return getNameInput().getAttribute("value");
-    }
-
-    private WebElement getNameInput() {
-        return driver.findElement(By.xpath("//div[label[text()='Name']]/input"));
-    }
-
-    public void setColo(String colo) {
-        WebElement coloInput = clusterBox.findElement(By.xpath("//div[label[text()='Colo']]/input"));
-        coloInput.clear();
-        sendKeysSlowly(coloInput, colo);
-    }
-
-    public void setDescription(String description) {
-        WebElement descriptionInput = clusterBox.findElement(By.xpath("//div[label[text()='Description']]/input"));
-        descriptionInput.clear();
-        sendKeysSlowly(descriptionInput, description);
-    }
-
-    public void setOwner(String owner) {
-        WebElement ownerInput = clusterBox.findElement(By.xpath("//div[label[text()='Owner']]/input"));
-        ownerInput.clear();
-        sendKeysSlowly(ownerInput, owner);
-    }
-
-    public void setGroup(String group) {
-        WebElement groupInput = clusterBox.findElement(By.xpath("//div[label[text()='Group']]/input"));
-        groupInput.clear();
-        sendKeysSlowly(groupInput, group);
-    }
-
-    public void setPermissions(String permissions) {
-        WebElement permissionsInput = clusterBox.findElement(By.xpath("//div[label[text()='Permissions']]/input"));
-        permissionsInput.clear();
-        sendKeysSlowly(permissionsInput, permissions);
-    }
-
-    /**
-     * Common method to fill interfaces.
-     */
-    public void setInterface(Interface iface) {
-        String xpath = "//input[contains(@ng-model,"
-            + " 'clusterEntity.clusterModel.cluster.interfaces.interface[%sPos]._endpoint')]";
-        WebElement ifaceEndpoint = clusterBox.findElement(By.xpath(String.format(xpath, iface.getType().value())));
-        ifaceEndpoint.clear();
-        sendKeysSlowly(ifaceEndpoint, iface.getEndpoint());
-        setInterfaceVersion(iface);
-    }
-
-    /**
-     * Set interface version by interface type.
-     */
-    public void setInterfaceVersion(Interface iface) {
-        WebElement ifaceVersion = getInterfaceVersionInput(iface.getType());
-        if (iface.getVersion() != null) {
-            ifaceVersion.clear();
-            sendKeysSlowly(ifaceVersion, iface.getVersion());
-        }
-    }
-
-    /**
-     * Get input for interface version by interface type.
-     */
-    private WebElement getInterfaceVersionInput(Interfacetype interfacetype) {
-        return clusterBox.findElement(By.xpath(String.format(
-            "//input[@ng-model='clusterEntity.clusterModel.cluster.interfaces.interface[%sPos]._version']",
-            interfacetype.value())));
-    }
-
-    /**
-     * Populates form with tags.
-     */
-    public void setTags(String tagsStr){
-        if (!StringUtils.isEmpty(tagsStr)) {
-            String [] tags = tagsStr.split(",");
-            for (int i = 0; i < tags.length; i++) {
-                if (i > 0){
-                    clickAddTag();
-                }
-                String key = tags[i].trim().split("=")[0];
-                String value = tags[i].trim().split("=")[1];
-                addTag(key, value);
-            }
-        }
-    }
-
-    /**
-     * Populates last (empty) tag and value fields and creates new fields.
-     * @param key tag key
-     * @param value tag value
-     */
-    public void addTag(String key, String value) {
-        List<WebElement> tagInputs = clusterBox.findElements(By.xpath("//input[@ng-model='tag.key']"));
-        List<WebElement> valueInputs = clusterBox.findElements(By.xpath("//input[@ng-model='tag.value']"));
-        WebElement tagInput = tagInputs.get(tagInputs.size() - 1);
-        sendKeysSlowly(tagInput, key);
-        WebElement valueInput = valueInputs.get(valueInputs.size() - 1);
-        sendKeysSlowly(valueInput, value);
-    }
-
-    public void clickAddTag() {
-        clusterBox.findElement(By.xpath("//button[contains(., 'add tag')]")).click();
-    }
-
-    public void clickDeleteTag() {
-        List<WebElement> buttons = clusterBox
-            .findElements(By.xpath("//div[@class='row dynamic-table-spacer ng-scope']//button[contains(.,'delete')]"));
-        buttons.get(buttons.size() - 1).click();
-    }
-
-    /**
-     * Fills property fields and creates new empty property fields.
-     */
-    public void addProperty(String name, String value) {
-        List<WebElement> propInputs = clusterBox.findElements(By.xpath("//input[@ng-model='property._name']"));
-        List<WebElement> valueInputs = clusterBox.findElements(By.xpath("//input[@ng-model='property._value']"));
-        WebElement propInput = propInputs.get(propInputs.size()-1);
-        sendKeysSlowly(propInput, name);
-        WebElement valueInput = valueInputs.get(valueInputs.size() - 1);
-        sendKeysSlowly(valueInput, value);
-        clickAddProperty();
-    }
-
-    public void clickAddProperty() {
-        clusterBox.findElement(By.xpath("//button[contains(., 'add property')]")).click();
-    }
-
-    /**
-     * Method to set locations. Location can be only one of ClusterLocationType. So adding new location only after
-     * respective compulsory location was filled.
-     * @param locations locations
-     */
-    public void setLocations(List<Location> locations) {
-        boolean staging = false, temp = false, working = false;
-        for(Location location : locations) {
-            WebElement pathInput = null;
-            if (location.getName() == ClusterLocationType.STAGING && !staging) {
-                pathInput = clusterBox.findElement(By.id("location.staging"));
-                staging = true;
-            } else  if (location.getName() == ClusterLocationType.TEMP && !temp) {
-                pathInput = clusterBox.findElement(By.id("location.temp"));
-                temp = true;
-            } else if (location.getName() == ClusterLocationType.WORKING && !working) {
-                pathInput = clusterBox.findElement(By.id("location.working"));
-                working = true;
-            } else {
-                fillAdditionalLocation(location);
-            }
-            if (pathInput != null) {
-                pathInput.clear();
-                sendKeysSlowly(pathInput, location.getPath());
-            }
-        }
-    }
-
-    /**
-     * Method populates last location fields with values and creates new fields.
-     */
-    public void fillAdditionalLocation(Location location) {
-        List<WebElement> allNameInputs = clusterBox
-            .findElements(By.xpath("//input[contains(@ng-model, 'location._name')]"));
-        sendKeysSlowly(allNameInputs.get(allNameInputs.size() - 1), location.getName().value());
-        List<WebElement> allPathInputs = clusterBox
-            .findElements(By.xpath("//input[contains(@ng-model, 'location._path')]"));
-        sendKeysSlowly(allPathInputs.get(allPathInputs.size() - 1), location.getPath());
-    }
-
-    public void clickAddLocation() {
-        clusterBox.findElement(By.xpath("//button[contains(., 'add location')]")).click();
-    }
-
-    public void clickDeleteLocation() {
-        List<WebElement> buttons = clusterBox
-            .findElements(By.xpath("//div[@class='row ng-scope']//button[contains(.,'delete')]"));
-        Assert.assertFalse(buttons.isEmpty(), "Delete button should be present.");
-        buttons.get(buttons.size() - 1).click();
-    }
-
-    public boolean checkElementByContent(String elementTag, String content) {
-        List<WebElement> elements = clusterBox.findElements(By.xpath("//" + elementTag));
-        for(WebElement element : elements) {
-            if (element.getAttribute("value").equals(content)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Method to assert the staging and Working location are same.
-     */
-    public void assertLocationsEqualError(){
-
-        // Assertion for Staging Location.
-        LOGGER.info(" Assertion for Staging Directory ");
-        Assert.assertTrue(checkErrorMessageByElement("input[contains(@id,'location.staging')]//following-sibling::"
-                + "span[contains(@ng-show, 'locationsEqualError')]",
-                "Staging and Working location should be different"));
-
-        // Assertion for Working Location.
-        LOGGER.info("Assertion for Working Directory");
-        Assert.assertTrue(checkErrorMessageByElement("input[contains(@id,'location.working')]//following-sibling::"
-                + "span[contains(@ng-show, 'locationsEqualError')]",
-                "Staging and Working location should be different"));
-    }
-
-    /**
-     * Method to get the Error text message displayed based on Xpath and compares.
-     * with the input string paramater : errMessage
-     * @param elementTag elementTag
-     * @param errMessage errMessage
-     */
-    public boolean checkErrorMessageByElement(String elementTag, String errMessage) {
-
-        List<WebElement> elements = clusterBox.findElements(By.xpath("//" + elementTag));
-        if (!elements.isEmpty()){
-            for (WebElement element : elements) {
-                Assert.assertEquals(element.getText(), errMessage);
-                LOGGER.info("Error Message Displayed : " + element.getText());
-            }
-            return true;
-        }else{
-            LOGGER.info(" No Elements found with the xpath " + elementTag);
-            return false;
-        }
-    }
-
-    /**
-     * Retrieves the value of the summary box and parses it to cluster properties.
-     * @param draft empty cluster to contain all properties.
-     * @return cluster filled with properties from the summary.
-     */
-    public ClusterMerlin getSummary(ClusterMerlin draft) {
-        ClusterMerlin cluster = new ClusterMerlin(draft.toString());
-        String summaryBoxText = summaryBox.getText();
-        LOGGER.info("Summary block text : " + summaryBoxText);
-
-        String[] slices;
-        String value;
-        String path;
-        String label;
-
-        //retrieve basic properties
-        String basicProps = summaryBoxText.split("ACL")[0];
-        for (String line : basicProps.split("\\n")) {
-            slices = line.split(" ");
-            label = slices[0].replace(":", "").trim();
-            value = getValueFromSlices(slices, line);
-            switch (label) {
-            case "Name":
-                cluster.setName(value);
-                break;
-            case "Colo":
-                cluster.setColo(value);
-                break;
-            case "Description":
-                cluster.setDescription(value);
-                break;
-            case "Tags":
-                cluster.setTags(value);
-                break;
-            default:
-                break;
-            }
-        }
-        //retrieve ALC
-        String propsLeft = summaryBoxText.split("ACL")[1];
-        String[] acl = propsLeft.split("Interfaces")[0].split(" ");
-        cluster.getACL().setOwner(acl[1]);
-        cluster.getACL().setGroup(acl[3]);
-        cluster.getACL().setPermission(acl[5].trim());
-
-        //retrieve interfaces
-        propsLeft = propsLeft.split("Interfaces")[1];
-        boolean propertiesPresent = propsLeft.contains("Properties");
-        String nextLabel = propertiesPresent ? "Properties" : "Locations";
-        String interfaces = propsLeft.split(nextLabel)[0].trim();
-        for (String line : interfaces.split("\\n")) {
-            slices = line.split(" ");
-            label = slices[0].replace(":", "").trim();
-            String endpoint = slices[1].trim();
-            String version = slices[3].trim();
-            switch (label) {
-            case "readonly":
-                cluster.addInterface(Interfacetype.READONLY, endpoint, version);
-                break;
-            case "write":
-                cluster.addInterface(Interfacetype.WRITE, endpoint, version);
-                break;
-            case "execute":
-                cluster.addInterface(Interfacetype.EXECUTE, endpoint, version);
-                break;
-            case "workflow":
-                cluster.addInterface(Interfacetype.WORKFLOW, endpoint, version);
-                break;
-            case "messaging":
-                cluster.addInterface(Interfacetype.MESSAGING, endpoint, version);
-                break;
-            case "registry":
-                cluster.addInterface(Interfacetype.REGISTRY, endpoint, version);
-                break;
-            default:
-                break;
-            }
-        }
-        //retrieve properties
-        if (propertiesPresent) {
-            propsLeft = propsLeft.split("Properties")[1];
-            String properties = propsLeft.split("Locations")[0].trim();
-            for (String line : properties.split("\\n")) {
-                int indx = line.indexOf(":");
-                String name = line.substring(0, indx).trim();
-                value = line.substring(indx + 1, line.length()).trim();
-                cluster.withProperty(name, value);
-            }
-        }
-        //retrieve locations
-        propsLeft = propsLeft.split("Locations")[1].trim();
-        for (String line : propsLeft.split("\\n")) {
-            slices = line.split(" ");
-            label = slices[0].replace(":", "").trim();
-            path = getValueFromSlices(slices, line);
-            switch (label) {
-            case "staging":
-                cluster.addLocation(ClusterLocationType.STAGING, path);
-                break;
-            case "temp":
-                cluster.addLocation(ClusterLocationType.TEMP, path);
-                break;
-            default:
-                cluster.addLocation(ClusterLocationType.WORKING, path);
-                break;
-            }
-        }
-        return cluster;
-    }
-
-    /**
-     * Clicks on cancel button.
-     */
-    public void cancel() {
-        cancel.click();
-    }
-
-    /**
-     *  Click on next button which is the same as finish step 1.
-     */
-    public void clickNext() {
-        next.click();
-        waitForAngularToFinish();
-        Assert.assertTrue(summaryBox.isDisplayed(), "Summary box should be displayed.");
-    }
-
-    /**
-     *  Click on next button in the cluster creation page.
-     */
-    public void clickJustNext() {
-        next.click();
-        waitForAngularToFinish();
-    }
-
-    /**
-     * Click on save button.
-     */
-    public void clickSave() {
-        save.click();
-        waitForAlert();
-    }
-
-    /**
-     * Clicks on previous button.
-     */
-    public void clickPrevious() {
-        previous.click();
-        waitForAngularToFinish();
-        UIAssert.assertDisplayed(clusterBox, "Cluster box");
-    }
-
-    /**
-     * Method imitates click on check box.
-     * @param expectedState whether check box is expected to be enabled or not after click.
-     */
-    public void checkRegistry(boolean expectedState) {
-        WebElement checkbox = clusterBox.findElement(By.xpath("//input[@type='checkbox']"));
-        clickCheckBoxSecurely(checkbox, expectedState);
-        waitForAngularToFinish();
-    }
-
-    public WebElement getInterfaceEndpoint(Interfacetype interfacetype) {
-        String xpath = String.format("//input[@ng-model='clusterEntity.clusterModel.cluster.interfaces"
-            + ".interface[%sPos]._endpoint']", interfacetype.value());
-        return clusterBox.findElement(By.xpath(xpath));
-    }
-
-    public String getInterfaceEndpointValue(Interfacetype interfacetype) {
-        return getInterfaceEndpoint(interfacetype).getAttribute("value");
-    }
-
-    public WebElement getInterfaceVersion(Interfacetype interfacetype) {
-        String xpath = String.format("//input[@ng-model='clusterEntity.clusterModel.cluster.interfaces"
-            + ".interface[%sPos]._version']", interfacetype.value());
-        return clusterBox.findElement(By.xpath(xpath));
-    }
-
-    public String getInterfaceVersionValue(Interfacetype interfacetype) {
-        return getInterfaceVersion(interfacetype).getAttribute("value");
-    }
-
-    /**
-     * Method preventing the NullPointerException.
-     */
-    public String getValueFromSlices(String[] slices, String line) {
-        String trimValue;
-        if (slices[0].length()==(line.length())) {
-            trimValue = "";
-        }else {
-            trimValue = slices[1].trim();
-        }
-        return trimValue;
-    }
-
-    /**
-     * Checks whether registry interface is enabled for input or not.
-     */
-    public boolean isRegistryEnabled() {
-        return getInterfaceEndpoint(Interfacetype.REGISTRY).isEnabled()
-            && getInterfaceVersion(Interfacetype.REGISTRY).isEnabled();
-    }
-
-    private WebElement getNameUnavailable(){
-        return clusterBox.findElement(By.xpath(
-            "//div[contains(@class, 'nameInputDisplay') and contains(@class, 'custom-danger')]"));
-    }
-
-    public void checkNameUnavailableDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getNameUnavailable(), "Name Unavailable not displayed");
-        }else {
-            try{
-                getNameUnavailable();
-                Assert.fail("Name Unavailable found");
-            } catch (Exception ex){
-                LOGGER.info("Name Unavailable not found");
-            }
-        }
-    }
-
-    @Override
-    public WebElement getEditXMLButton() {
-        return driver.findElement(By.id("cluster.editXML"));
-    }
-
-    @Override
-    public ClusterMerlin getEntityFromXMLPreview() {
-        return new ClusterMerlin(getXMLPreview());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityPage.java
deleted file mode 100644
index 98bf9b5..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityPage.java
+++ /dev/null
@@ -1,692 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import com.google.common.collect.ImmutableMap;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.WordUtils;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.Keys;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.PageFactory;
-import org.openqa.selenium.support.ui.Select;
-import org.testng.asserts.SoftAssert;
-
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-
-import static org.testng.Assert.assertEquals;
-import static org.testng.Assert.assertNotNull;
-import static org.testng.Assert.assertTrue;
-
-/**
- * Class representation of Search UI entity page.
- */
-public class EntityPage extends AbstractSearchPage {
-    private static final Logger LOGGER = Logger.getLogger(EntityPage.class);
-
-    /**
-     * Possible instance actions available on entity page.
-     */
-    public enum InstanceAction {
-        Log,
-        Resume,
-        Rerun,
-        Suspend,
-        Kill
-    }
-
-    public EntityPage(WebDriver driver) {
-        super(driver);
-    }
-
-    private WebElement getEntityTitle() {
-        final WebElement title = driver.findElement(By.id("entity-title"));
-        UIAssert.assertDisplayed(title, "entity title");
-        return title;
-    }
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "dependencies-graph")
-    })
-    private WebElement dependencyBox;
-
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(xpath = "(.//*[contains(@class, 'detailsBox')])[2]")
-    })
-    private WebElement instanceListBox;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "summaryBox")
-    })
-    private WebElement propertiesBlock;
-
-    public String getEntityName() {
-        UIAssert.assertDisplayed(getEntityTitle(), "Entity title");
-        return getEntityTitle().getText().split(" ")[0];
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(dependencyBox, "Dependency box");
-        UIAssert.assertDisplayed(instanceListBox, "Instance list box");
-        UIAssert.assertDisplayed(propertiesBlock, "Summary box");
-    }
-
-    public EntityPage refreshPage() {
-        final String entityName = getEntityName();
-        SearchPage searchPage = getPageHeader().gotoHome();
-        return searchPage.openEntityPage(entityName);
-    }
-
-    public void checkFeedProperties(FeedMerlin feed) {
-        openProperties();
-
-        final WebElement propertiesBox =
-            propertiesBlock.findElement(By.xpath("//div[@ui-view='feedSummary']"));
-        UIAssert.assertDisplayed(propertiesBox, "Properties box");
-
-        //all the parts of the entity properties
-        final List<WebElement> propertyParts = propertiesBox.findElements(By.xpath("./div"));
-        //First set of properties
-        final WebElement generalBox = propertyParts.get(0);
-        final List<WebElement> generalParts = generalBox.findElements(By.xpath("./div"));
-        SoftAssert softAssert = new SoftAssert();
-        //General
-        softAssert.assertEquals(generalParts.get(0).getText(), "General", "Unexpected heading");
-        final List<WebElement> nameAndDesc = generalParts.get(1).findElements(By.xpath("./div"));
-        softAssert.assertEquals(nameAndDesc.get(0).getText(), "Name: " + feed.getName(),
-            "Unexpected feed name in properties.");
-        softAssert.assertEquals(nameAndDesc.get(1).getText(), "Description: " + feed.getDescription(),
-            "Unexpected description in properties.");
-        //Tags
-        softAssert.assertEquals(generalParts.get(2).getText(), "Tags", "Unexpected heading");
-        softAssert.assertEquals(generalParts.get(3).getText(),
-            StringUtils.trimToEmpty(feed.getTags()),
-            "Unexpected tags");
-        //Groups
-        softAssert.assertEquals(generalParts.get(4).getText(), "Groups", "Unexpected heading");
-        softAssert.assertEquals(generalParts.get(5).getText(),
-            StringUtils.trimToEmpty(feed.getGroups()),
-            "Unexpected groups");
-        //Access Control list
-        softAssert.assertEquals(generalParts.get(6).getText(), "Access Control List",
-            "Unexpected heading");
-        final List<WebElement> ownerGrpPerm = generalParts.get(7).findElements(By.xpath("./div"));
-        softAssert.assertEquals(ownerGrpPerm.get(0).getText(),
-            "Owner: " + feed.getACL().getOwner(), "Unexpected owner");
-        softAssert.assertEquals(ownerGrpPerm.get(1).getText(),
-            "Group: " + feed.getACL().getGroup(), "Unexpected group");
-        softAssert.assertEquals(ownerGrpPerm.get(2).getText(),
-            "Permissions: " + feed.getACL().getPermission(), "Unexpected permission");
-        //Schema
-        softAssert.assertEquals(generalParts.get(8).getText(), "Schema",
-            "Unexpected heading for general properties");
-        final List<WebElement> locAndProvider = generalParts.get(9).findElements(By.xpath("./div"));
-        softAssert.assertEquals(locAndProvider.get(0).getText(),
-            "Location: " + feed.getSchema().getLocation(), "Unexpected schema locations");
-        softAssert.assertEquals(locAndProvider.get(1).getText(),
-            "Provider: " + feed.getSchema().getProvider(), "Unexpected schema provider");
-        //Properties
-        softAssert.assertEquals(generalParts.get(10).getText(), "Properties",
-            "Unexpected heading for general properties");
-        final List<WebElement> freqLateAvail = generalParts.get(11).findElements(By.xpath("./div"));
-        final Frequency feedFrequency = feed.getFrequency();
-        softAssert.assertEquals(freqLateAvail.get(0).getText(),
-            String.format("Frequency: Every %s %s",
-                feedFrequency.getFrequency(), feedFrequency.getTimeUnit()),
-            "Unexpected frequency");
-        final Frequency feedLateCutoff = feed.getLateArrival().getCutOff();
-        softAssert.assertEquals(freqLateAvail.get(1).getText(),
-            String.format("Late Arrival: Up to %s %s",
-                feedLateCutoff.getFrequency(), feedLateCutoff.getTimeUnit()),
-            "Unexpected late arrival");
-        softAssert.assertEquals(freqLateAvail.get(2).getText(),
-            String.format("Availability Flag:%s",
-                StringUtils.trimToEmpty(feed.getAvailabilityFlag())),
-            "Unexpected availability flag");
-        final List<WebElement> propertyElements =
-            generalParts.get(12).findElements(By.xpath("./div"));
-        List<String> displayedPropStr = new ArrayList<>();
-        for (WebElement webElement : propertyElements) {
-            displayedPropStr.add(webElement.getText());
-        }
-        Collections.sort(displayedPropStr);
-        final List<String> expectedPropStr = getFeedPropString(feed);
-        softAssert.assertEquals(displayedPropStr, expectedPropStr,
-            "Feed properties & displayed properties don't match. Expected: " + expectedPropStr
-                + " Actual: " + displayedPropStr);
-        //Storage type
-        softAssert.assertEquals(generalParts.get(13).getText(), "Default Storage Type:",
-            "Unexpected label for storage type.");
-        if (feed.getLocations() != null
-            && feed.getLocations().getLocations() != null
-            && feed.getLocations().getLocations().size() > 0) {
-            softAssert.assertEquals(generalParts.get(13).getText(), "File System",
-                "Unexpected storage type for feed.");
-        } else {
-            softAssert.fail("Need to add handler for other feed types.");
-        }
-        //Feed locations - Data followed by Stats followed by Meta
-        softAssert.assertEquals(generalParts.get(14).getText(), "Default Location:",
-            "Unexpected label for default location.");
-        softAssert.assertEquals(generalParts.get(15).getText(),
-            "Data\n" + feed.getFeedPath(LocationType.DATA),
-            "Unexpected label for feed data label");
-        softAssert.assertEquals(generalParts.get(16).getText(),
-            "Stats\n" + feed.getFeedPath(LocationType.STATS),
-            "Unexpected label for feed stats label");
-        softAssert.assertEquals(generalParts.get(17).getText(),
-            "Meta\n" + feed.getFeedPath(LocationType.META),
-            "Unexpected label for feed mata label");
-
-        //Second set of properties details with Source Cluster Properties
-        final WebElement clustersBox = propertyParts.get(1);
-        final List<WebElement> displayedClusters = clustersBox.findElements(By.xpath("./div"));
-        final List<Cluster> feedClusters = feed.getClusters().getClusters();
-        //test needs to be fixed when we have support for more than one feed cluster
-        softAssert.assertEquals(feedClusters.size(), 1,
-            "Current UI has support for only one feed cluster.");
-        checkFeedCluster(displayedClusters.get(0), feedClusters.get(0), softAssert);
-        softAssert.assertAll();
-    }
-
-    private void openProperties() {
-        final WebElement heading = propertiesBlock.findElement(By.tagName("h4"));
-        assertEquals(heading.getText(), "Properties",
-            "Unexpected heading of properties box.");
-        final WebElement upButton = propertiesBlock.findElement(By.className("pointer"));
-        upButton.click();
-    }
-
-    private void checkFeedCluster(WebElement cluster, Cluster feedCluster, SoftAssert softAssert) {
-        final List<WebElement> clusterElements = cluster.findElements(By.xpath("./div"));
-        final String vClusterName = clusterElements.get(1).getText();
-        softAssert.assertNotNull(feedCluster,
-            "Unexpected feed cluster is displayed: " + vClusterName);
-        final String clusterType = clusterElements.get(0).getText();
-        softAssert.assertEquals(clusterType,
-            WordUtils.capitalize(feedCluster.getType().toString().toLowerCase() + " Cluster"),
-            "Unexpected cluster type for cluster: " + vClusterName);
-        softAssert.assertEquals(clusterElements.get(2).getText(),
-            "Start: " + feedCluster.getValidity().getStart()
-                + "\nEnd: " + feedCluster.getValidity().getEnd(),
-            "Unexpected validity of the cluster: " + vClusterName);
-        softAssert.assertEquals(clusterElements.get(3).getText(), "Timezone: UTC",
-            "Unexpected timezone for validity of the cluster: " + vClusterName);
-        softAssert.assertEquals(clusterElements.get(4).getText(),
-            "Retention: Archive in " + feedCluster.getRetention().getLimit().getFrequency()
-                + " " + feedCluster.getRetention().getLimit().getTimeUnit(),
-            "Unexpected retention associated with cluster: " + vClusterName);
-    }
-
-    private List<String> getFeedPropString(FeedMerlin feed) {
-        List<String> retVals = new ArrayList<>();
-        for (Property property : feed.getProperties().getProperties()) {
-            retVals.add(property.getName() + ": " + property.getValue());
-        }
-        Collections.sort(retVals);
-        return retVals;
-    }
-
-    public void checkProcessProperties(ProcessMerlin process) {
-        openProperties();
-
-        final WebElement propertiesBox =
-            propertiesBlock.findElement(By.xpath("//div[@ui-view='processSummary']"));
-        UIAssert.assertDisplayed(propertiesBox, "Properties box");
-        final List<WebElement> propertiesParts = propertiesBox.findElements(By.xpath("./div"));
-        final WebElement generalPropBlock = propertiesParts.get(0);
-        final WebElement clusterPropBlock = propertiesParts.get(1);
-        final WebElement inputPropBlock = propertiesParts.get(2);
-        final WebElement outputPropBlock = propertiesParts.get(3);
-
-        //checking general properties
-        final List<WebElement> generalPropParts =
-            generalPropBlock.findElement(By.xpath("./*")).findElements(By.xpath("./*"));
-        SoftAssert softAssert = new SoftAssert();
-        softAssert.assertEquals(generalPropParts.get(0).getText(), "Process",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(1).getText(), "Name",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(2).getText(), process.getName(),
-            "Unexpected process name in general properties.");
-        softAssert.assertEquals(generalPropParts.get(3).getText(), "Tags",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(4).getText(),
-            StringUtils.defaultIfBlank(process.getTags(), "No tags selected"),
-            "Unexpected tags in general properties.");
-        softAssert.assertEquals(generalPropParts.get(5).getText(), "Workflow",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(6).getText(), "Name\nEngine\nVersion",
-            "Unexpected workflow properties in general properties.");
-        softAssert.assertEquals(generalPropParts.get(7).getText(),
-            String.format("%s%n%s%n%s",
-                StringUtils.defaultIfBlank(process.getWorkflow().getName(), ""),
-                process.getWorkflow().getEngine(), process.getWorkflow().getVersion()),
-            "Unexpected workflow properties in general properties.");
-        softAssert.assertEquals(generalPropParts.get(7).getText(), "Path",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(8).getText(), process.getWorkflow().getPath(),
-            "Unexpected workflow path in general properties.");
-        softAssert.assertEquals(generalPropParts.get(9).getText(), "Timing",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(10).getText(), "Timezone",
-            "Unexpected label in general properties.");
-        softAssert.assertEquals(generalPropParts.get(12).getText(),
-            String.format("Frequency%nEvery %s %s%n", process.getFrequency().getFrequency(),
-                process.getFrequency().getTimeUnit())
-                + "Max. parallel instances\n" + process.getParallel()
-                + "\nOrder\n" + process.getOrder().toString(),
-            "Unexpected frequency/parallel/order info in general properties.");
-        softAssert.assertEquals(generalPropParts.get(13).getText(), "Retry",
-            "Unexpected label in general properties.");
-        final Retry processRetry = process.getRetry();
-        softAssert.assertEquals(generalPropParts.get(14).getText(),
-            "Policy\n" + processRetry.getPolicy().toString().toLowerCase()
-                + "\nAttempts\n" + processRetry.getAttempts()
-                + "\nDelay\nUp to " + processRetry.getDelay().getFrequency()
-                + " " + processRetry.getDelay().getTimeUnit(),
-            "Unexpected policy/attempt/delay in general properties.");
-
-        //checking cluster properties
-        final List<WebElement> allClusterProps =
-            clusterPropBlock.findElements(By.xpath("./div/div/div"));
-        final WebElement clustersHeading = clusterPropBlock.findElement(By.xpath(".//h5"));
-        softAssert.assertEquals(clustersHeading.getText(), "Clusters",
-            "Unexpected label in clusters heading");
-        for (WebElement oneClusterProp : allClusterProps) {
-            final List<WebElement> clusterPropParts = oneClusterProp.findElements(By.xpath("./*"));
-            softAssert.assertEquals(clusterPropParts.get(0).getText(), "Name",
-                "Unexpected label in clusters properties");
-            final String clusterName = clusterPropParts.get(1).getText();
-            final org.apache.falcon.entity.v0.process.Cluster processCluster =
-                process.getClusterByName(clusterName);
-            softAssert.assertNotNull(processCluster,
-                "cluster with name " + clusterName + " was not present in process.");
-            softAssert.assertEquals(clusterName, processCluster.getName(),
-                "Unexpected cluster name in clusters properties");
-            softAssert.assertEquals(clusterPropParts.get(2).getText(), "Validity",
-                "Unexpected label in clusters properties");
-            softAssert.assertEquals(clusterPropParts.get(3).getText(),
-                "Start\n" + processCluster.getValidity().getStart()
-                + "\nEnd\n" + processCluster.getValidity().getEnd(),
-                "Unexpected start/end time in clusters properties");
-        }
-        //checking  inputs properties
-        final WebElement inputHeading = inputPropBlock.findElement(By.xpath(".//h5"));
-        softAssert.assertEquals(inputHeading.getText(), "Inputs",
-            "Unexpected heading for input properties.");
-        final List<WebElement> allInputsProps =
-            inputPropBlock.findElements(By.xpath("./div/div/*"));
-        for (WebElement oneInputProps : allInputsProps) {
-            final List<WebElement> inputPropParts = oneInputProps.findElements(By.xpath("./*"));
-            softAssert.assertEquals(inputPropParts.get(0).getText(), "Name",
-                "Unexpected label in input properties");
-            final String inputName = inputPropParts.get(1).getText();
-            final Input processInput = process.getInputByName(inputName);
-            softAssert.assertEquals(inputName, processInput.getName(),
-                "Unexpected input name in input properties");
-            softAssert.assertEquals(inputPropParts.get(2).getText(), "Feed",
-                "Unexpected label in input properties");
-            softAssert.assertEquals(inputPropParts.get(3).getText(), processInput.getFeed(),
-                "Unexpected label in input properties");
-            softAssert.assertEquals(inputPropParts.get(4).getText(), "Instance",
-                "Unexpected label in input properties");
-            softAssert.assertEquals(inputPropParts.get(5).getText(),
-                "Start\n" + processInput.getStart() + "\nEnd\n" + processInput.getEnd(),
-                "Unexpected start/end in input properties");
-        }
-        final WebElement outputHeading = outputPropBlock.findElement(By.tagName("h5"));
-        softAssert.assertEquals(outputHeading.getText(), "Outputs",
-            "Unexpected label for output properties.");
-        final List<WebElement> allOutputsProps =
-            outputPropBlock.findElements(By.xpath("./div/div/*"));
-        for (WebElement oneOutputProps : allOutputsProps) {
-            final List<WebElement> outputPropParts = oneOutputProps.findElements(By.xpath("./*"));
-            softAssert.assertEquals(outputPropParts.get(0).getText(), "Name",
-                "Unexpected label in output properties");
-            final String outputName = outputPropParts.get(1).getText();
-            final Output processOutput = process.getOutputByName(outputName);
-            softAssert.assertEquals(outputName, processOutput.getName(),
-                "Unexpected output name in output properties");
-            softAssert.assertEquals(outputPropParts.get(2).getText(), "Feed",
-                "Unexpected label in output properties");
-            softAssert.assertEquals(outputPropParts.get(3).getText(), processOutput.getFeed(),
-                "Unexpected feed name in output properties");
-            softAssert.assertEquals(outputPropParts.get(4).getText(), "Instance",
-                "Unexpected label in output properties");
-            softAssert.assertEquals(outputPropParts.get(5).getText(), processOutput.getInstance(),
-                "Unexpected instance in output properties");
-            softAssert.assertAll();
-        }
-    }
-
-    public void performActionOnSelectedInstances(InstanceAction instanceAction) {
-        driver.findElement(By.xpath(String.format("//td/div[%d]", instanceAction.ordinal() + 1))).click();
-        waitForAngularToFinish();
-        //timeout to refresh a view
-        TimeUtil.sleepSeconds(2);
-    }
-
-    public InstanceSummary getInstanceSummary() {
-        return new InstanceSummary(this);
-    }
-
-    /**
-     * Class representing all the displayed instance.
-     */
-    public static class InstanceSummary {
-        private final WebElement instanceListBox;
-        private final WebElement summaryTableHeading;
-
-        public InstanceSummary(EntityPage entityPage) {
-            instanceListBox = entityPage.instanceListBox;
-            UIAssert.assertDisplayed(instanceListBox, "instance list box");
-            assertEquals(instanceListBox.findElement(By.tagName("h4")).getText(),
-                "Instances",
-                "Unexpected heading in instances box.");
-
-            summaryTableHeading = instanceListBox.findElement(By.xpath(".//thead/tr"));
-        }
-
-        private List<WebElement> getTableRows() {
-            return instanceListBox.findElements(By.xpath(".//tbody/tr"));
-        }
-
-        /**
-         * Get instance summary starting for all the pages.
-         * @return instance summary
-         */
-        public List<OneInstanceSummary> getSummary() {
-            List<OneInstanceSummary> summary = new ArrayList<>();
-            final List<WebElement> tableBody = getTableRows();
-            //last line has page number
-            final WebElement pageNumberRow = tableBody.remove(tableBody.size() - 1);
-            final List<WebElement> pages = pageNumberRow.findElement(By.className("pagination"))
-                .findElements(By.className("ng-scope"));
-            final int numberOfPages = pages.size();
-            for (int pageNumber = 1; pageNumber <= numberOfPages; ++pageNumber) {
-                //We want to use new web elements to avoid stale element issues
-                final List<WebElement> newTableBody = getTableRows();
-                //last line has page number
-                final WebElement newPageNumberRow = newTableBody.remove(newTableBody.size() - 1);
-                final List<WebElement> newPages =
-                    newPageNumberRow.findElement(By.className("pagination"))
-                        .findElements(By.className("ng-scope"));
-                newPages.get(pageNumber-1).findElement(By.tagName("a")).click();
-                summary.addAll(getSummaryInner());
-            }
-            return summary;
-        }
-
-        /**
-         * Get instance summary starting for the current page.
-         * @return instance summary
-         */
-        private List<OneInstanceSummary> getSummaryInner() {
-            List<OneInstanceSummary> summary = new ArrayList<>();
-            final List<WebElement> tableBody = getTableRows();
-            //first line in body has buttons
-            tableBody.remove(0);
-            //last line has page number
-            tableBody.remove(tableBody.size() - 1);
-            //second last line is horizontal line
-            tableBody.remove(tableBody.size() - 1);
-            if (tableBody.size() == 1
-                && tableBody.get(0).getText().equals("There are no results")) {
-                return summary;
-            }
-            for (WebElement oneSummaryRow : tableBody) {
-                summary.add(new OneInstanceSummary(oneSummaryRow));
-            }
-            return summary;
-        }
-
-        public void check() {
-            final List<WebElement> summaryHeadParts = getSummaryHeadParts();
-            getSelectAllCheckBox(summaryHeadParts);
-            final WebElement instanceHeadLabel = summaryHeadParts.get(1);
-            assertEquals(instanceHeadLabel.getText(), "Instance",
-                "Unexpected label in instance summary heading");
-            getSummaryStartedButton();
-            getSummaryEndedButton();
-            getStatusDropDown();
-        }
-
-        public void setInstanceSummaryStartTime(String timeStr) {
-            final WebElement startTimeButton = getSummaryStartedButton();
-            startTimeButton.clear();
-            sendKeysSlowly(startTimeButton, timeStr);
-            startTimeButton.sendKeys(Keys.ENTER);
-        }
-
-        public void setInstanceSummaryEndTime(String timeStr) {
-            final WebElement endTimeButton = getSummaryEndedButton();
-            endTimeButton.clear();
-            sendKeysSlowly(endTimeButton, timeStr);
-            endTimeButton.sendKeys(Keys.ENTER);
-        }
-
-        public void selectInstanceSummaryStatus(String labelText) {
-            getStatusDropDown().selectByVisibleText(labelText);
-        }
-
-        public static OneInstanceSummary getOneSummary(final List<OneInstanceSummary> summaries,
-                                                       final String nominalTime) {
-            for (OneInstanceSummary oneSummary : summaries) {
-                if (oneSummary.getNominalTime().equals(nominalTime)) {
-                    return oneSummary;
-                }
-            }
-            return null;
-        }
-
-        public void checkSummary(InstancesResult.Instance[] apiSummary) {
-            final List<OneInstanceSummary> summary = getSummary();
-            assertEquals(apiSummary.length, summary.size(),
-                String.format("Length of the displayed instance summary is not same: %s %s",
-                    Arrays.toString(apiSummary), summary));
-            for (InstancesResult.Instance oneApiSummary : apiSummary) {
-                final OneInstanceSummary oneSummary =
-                    getOneSummary(summary, oneApiSummary.instance);
-                assertEquals(oneApiSummary.instance, oneSummary.getNominalTime(),
-                    "Nominal time of instance summary doesn't match.");
-                final SimpleDateFormat dateFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm");
-                final Date apiStartTime = oneApiSummary.getStartTime();
-                if (apiStartTime == null) {
-                    assertTrue(StringUtils.isEmpty(oneSummary.getStartTime()),
-                        "Displayed start time : " + oneSummary + " is not "
-                            + "consistent with start time of api which is null");
-                } else {
-                    assertEquals(oneSummary.getStartTime(), dateFormat.format(apiStartTime),
-                        "Displayed start time : " + oneSummary + " is not "
-                            + "consistent with start time of api: " + apiStartTime);
-                }
-                final Date apiEndTime = oneApiSummary.getEndTime();
-                if (apiEndTime == null) {
-                    assertTrue(StringUtils.isEmpty(oneSummary.getEndTime()),
-                        "Displayed end time : " + oneSummary + " is not "
-                            + "consistent end start time of api which is null");
-                } else {
-                    assertEquals(oneSummary.getEndTime(), dateFormat.format(apiEndTime),
-                        "Displayed end time : " + oneSummary + " is not "
-                            + "consistent with end time of api: " + apiEndTime);
-                }
-                assertEquals(oneApiSummary.status.toString(), oneSummary.getStatus(),
-                    "Status of instance summary doesn't match.");
-            }
-        }
-
-        public WebElement getSummaryStartedButton() {
-            final WebElement startedBox = getSummaryHeadParts().get(2);
-            assertEquals(startedBox.getText(), "Started ",
-                "Unexpected label in instance summary heading");
-            return startedBox.findElement(By.tagName("input"));
-        }
-
-        public WebElement getSummaryEndedButton() {
-            final WebElement endedBox = getSummaryHeadParts().get(3);
-            assertEquals(endedBox.getText(), "Ended ",
-                "Unexpected label in instance summary heading");
-            return endedBox.findElement(By.tagName("input"));
-        }
-
-        public Select getStatusDropDown() {
-            final WebElement statusBox = getSummaryHeadParts().get(4);
-            assertEquals(statusBox.getText(),
-                "Status \nALL\nRUNNING\nSUCCEEDED\nSUSPENDED\nWAITING\nKILLED",
-                "Unexpected label in instance summary heading");
-            return new Select(statusBox.findElement(By.tagName("select")));
-        }
-
-        public List<WebElement> getSummaryHeadParts() {
-            return summaryTableHeading.findElements(By.xpath("./th/div"));
-        }
-
-        public WebElement getSelectAllCheckBox(List<WebElement> summaryHeadParts) {
-            return summaryHeadParts.get(0).findElement(By.tagName("input"));
-        }
-    }
-
-
-    public InstancePage openInstance(String nominalTime) {
-        instanceListBox.findElement(By.xpath("//button[contains(.,'" + nominalTime + "')]")).click();
-        return PageFactory.initElements(driver, InstancePage.class);
-    }
-
-    /**
-     * Class representing summary of one instance.
-     */
-    public static final class OneInstanceSummary {
-        private final WebElement oneInstanceSummary;
-        private final String startTime;
-        private final String endTime;
-        private final String status;
-        private final String nominalTime;
-
-        private final Map<Object, Object> statusColorMap = ImmutableMap.builder()
-            .put("WAITING", "rgba(51, 51, 51, 1)")
-            .put("RUNNING", "")
-            .put("KILLED", "")
-            .put("SUCCEEDED", "")
-            .put("SUSPENDED", "")
-            .put("FAILED", "").build();
-        private boolean isCheckBoxTicked;
-
-        private OneInstanceSummary(WebElement oneInstanceSummary) {
-            this.oneInstanceSummary = oneInstanceSummary;
-            nominalTime = getNominalTimeButton().getText();
-            startTime = getSummaryCols().get(2).getText();
-            endTime = getSummaryCols().get(3).getText();
-
-            final WebElement statusElement = getSummaryCols().get(4);
-            assertTrue(statusElement.isDisplayed(), "Status should be displayed");
-            final String statusText = statusElement.getText();
-            final Object expectedColor = statusColorMap.get(statusText.trim());
-            assertNotNull(expectedColor,
-                "Unexpected status: " + statusText + " not found in: " + statusColorMap);
-            //status color not checked
-            //final String actualColor = statusElement.getCssValue("color");
-            //assertEquals(actualColor, expectedColor,
-            //    "Unexpected color for status in process instances block: " + statusText);
-            status = statusText;
-            isCheckBoxTicked = getCheckBox().isSelected();
-        }
-
-        private List<WebElement> getSummaryCols() {
-            return oneInstanceSummary.findElements(By.tagName("td"));
-        }
-
-        private WebElement getCheckBox() {
-            return getSummaryCols().get(0).findElement(By.tagName("input"));
-        }
-
-        private WebElement getNominalTimeButton() {
-            return getSummaryCols().get(1);
-        }
-
-        public String getStartTime() {
-            return startTime;
-        }
-
-        public String getEndTime() {
-            return endTime;
-        }
-
-        public String getStatus() {
-            return status;
-        }
-        public String getNominalTime() {
-            return nominalTime;
-        }
-
-        public boolean isCheckBoxSelected() {
-            return isCheckBoxTicked;
-        }
-
-        /**
-         * Click the checkbox corresponding to this result. It is the responsibility of the
-         * client to make sure that the web element for the instance is displayed and valid.
-         */
-        public void clickCheckBox() {
-            getCheckBox().click();
-            // Toggling of checkbox should change its internal state
-            // Note that we can't expect the web element to be displayed & valid at the point this
-            // object is used
-            isCheckBoxTicked = !isCheckBoxTicked;
-        }
-
-        @Override
-        public String toString() {
-            return "OneInstanceSummary{"
-                + "checkBox=" + isCheckBoxSelected()
-                + ", nominalTime=" + getNominalTime()
-                + ", startTime=" + getStartTime()
-                + ", endTime=" + getEndTime()
-                + ", status=" + getStatus()
-                + "}";
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityWizardPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityWizardPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityWizardPage.java
deleted file mode 100644
index 72c03cf..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/EntityWizardPage.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.falcon.entity.v0.Entity;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.testng.Assert;
-
-/**
- * https://issues.apache.org/jira/browse/FALCON-1546.
- * Parent class for cluster, feed and process wizard pages.
- */
-public abstract class EntityWizardPage extends AbstractSearchPage {
-    @FindBy(xpath = "//i[contains(@class, 'pointer')]")
-    protected WebElement xmlPreviewPointer;
-    protected WebElement xmlPreview = null;
-
-    public EntityWizardPage(WebDriver driver) {
-        super(driver);
-    }
-
-    /**
-     * Expand/collapse xml preview.
-     * @param shouldBeExpanded should preview be expanded or collapsed.
-     */
-    public void clickXMLPreview(boolean shouldBeExpanded) {
-        if (isXmlPreviewExpanded() != shouldBeExpanded) {
-            xmlPreviewPointer.click();
-        }
-        Assert.assertEquals(isXmlPreviewExpanded(), shouldBeExpanded,
-            "Xml preview should be " + (shouldBeExpanded ? " expanded." : " collapsed."));
-    }
-
-    /**
-     * @return true if xml preview exists and is displayed, false otherwise.
-     */
-    public boolean isXmlPreviewExpanded() {
-        xmlPreview = getElementOrNull("//textarea[@ng-model='prettyXml']");
-        return xmlPreview != null && xmlPreview.isDisplayed();
-    }
-
-    public String getXMLPreview() {
-        //preview block fetches changes slower then they appear on the form
-        waitForAngularToFinish();
-        clickXMLPreview(true);
-        return xmlPreview.getAttribute("value");
-    }
-
-    public abstract Entity getEntityFromXMLPreview();
-
-    /**
-     * Pushes xml into xml preview block.
-     * @param xml entity definition
-     */
-    public void setXmlPreview(String xml) {
-        clickEditXml(true);
-        xmlPreview.clear();
-        xmlPreview.sendKeys(xml);
-        waitForAngularToFinish();
-        clickEditXml(false);
-    }
-
-    /**
-     * Clicks on editXml button.
-     */
-    public void clickEditXml(boolean shouldBeEnabled) {
-        waitForAngularToFinish();
-        clickXMLPreview(true);
-        getEditXMLButton().click();
-        String disabled = xmlPreview.getAttribute("disabled");
-        Assert.assertEquals(disabled == null, shouldBeEnabled,
-            "Xml preview should be " + (shouldBeEnabled ? "enabled" : "disabled"));
-    }
-
-    public abstract WebElement getEditXMLButton();
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/FeedWizardPage.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/FeedWizardPage.java b/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/FeedWizardPage.java
deleted file mode 100644
index 3dfab38..0000000
--- a/falcon-regression/merlin/src/main/java/org/apache/falcon/regression/ui/search/FeedWizardPage.java
+++ /dev/null
@@ -1,652 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.ui.search;
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.util.UIAssert;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-import org.openqa.selenium.support.FindBy;
-import org.openqa.selenium.support.FindBys;
-import org.openqa.selenium.support.ui.Select;
-import org.testng.Assert;
-
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.List;
-
-/** Page object of the Feed creation page. */
-public class FeedWizardPage extends EntityWizardPage {
-
-    private static final Logger LOGGER = Logger.getLogger(FeedWizardPage.class);
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "feedForm")
-    })
-    private WebElement feedBox;
-
-    @FindBys({
-            @FindBy(className = "mainUIView"),
-            @FindBy(className = "feedForm"),
-            @FindBy(className = "nextBtn")
-    })
-    private WebElement nextButton;
-
-    @FindBys({
-        @FindBy(className = "mainUIView"),
-        @FindBy(className = "feedForm"),
-        @FindBy(className = "prevBtn")
-    })
-    private WebElement previousButton;
-
-    @FindBys({
-            @FindBy(xpath = "//button[contains(.,'add tag')]")
-    })
-    private WebElement addTagButton;
-
-    @FindBys({
-        @FindBy(xpath = "//button[contains(.,'delete')]")
-    })
-    private WebElement deleteButton;
-
-    @FindBys({
-            @FindBy(xpath = "//button[contains(.,'add property')]")
-    })
-    private WebElement addPropertyButton;
-
-    @FindBys({
-        @FindBy(xpath = "//button[contains(.,'Catalog Storage')]")
-    })
-    private WebElement catalogStorageButton;
-
-    @FindBys({
-            @FindBy(id = "feed.step5")
-    })
-    private WebElement saveFeedButton;
-
-    @FindBy(xpath = "//a[contains(.,'Cancel')]")
-    private WebElement cancelButton;
-
-    public FeedWizardPage(WebDriver driver) {
-        super(driver);
-    }
-
-    @Override
-    public void checkPage() {
-        UIAssert.assertDisplayed(feedBox, "Feed box");
-    }
-
-    private WebElement getFeedName() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.name']"));
-    }
-    private WebElement getFeedDescription() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.description']"));
-    }
-    private WebElement getFeedTagKey(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='tag.key']")).get(index);
-    }
-    private WebElement getFeedTagValue(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='tag.value']")).get(index);
-    }
-    private WebElement getFeedGroups() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.groups']"));
-    }
-    private WebElement getFeedACLOwner() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.ACL.owner']"));
-    }
-    private WebElement getFeedACLGroup() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.ACL.group']"));
-    }
-    private WebElement getFeedACLPermissions() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.ACL.permission']"));
-    }
-    private WebElement getFeedSchemaLocation() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.schema.location']"));
-    }
-    private WebElement getFeedSchemaProvider() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.schema.provider']"));
-    }
-
-    private WebElement getFeedFrequencyQuantity() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.frequency.quantity']"));
-    }
-    private Select getFeedFrequencyUnit() {
-        return new Select(feedBox.findElement(By.xpath("//select[@ng-model='feed.frequency.unit']")));
-    }
-    private WebElement getFeedLateArrivalCheckBox() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.lateArrival.active']"));
-    }
-    private WebElement getFeedLateArrivalCutOffQuantity() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.lateArrival.cutOff.quantity']"));
-    }
-    private Select getFeedLateArrivalCutOffUnit() {
-        return new Select(feedBox.findElement(By.xpath("//select[@ng-model='feed.lateArrival.cutOff.unit']")));
-    }
-    private WebElement getFeedAvailabilityFlag() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='feed.availabilityFlag']"));
-    }
-    private Select getFeedTimeZone() {
-        return new Select(feedBox.findElement(By.xpath("//time-zone-select[@ng-model='feed.timezone']/select")));
-    }
-
-    private WebElement getQueueName() {
-        return feedBox.findElement(By.xpath("//label[.='queueName']/following-sibling::div/input"));
-    }
-
-    private Select getJobPriority() {
-        return new Select(feedBox.findElement(By.xpath("//label[.='jobPriority']/following-sibling::div/select")));
-    }
-
-    private WebElement getTimeoutQuantity() {
-        return feedBox.findElement(By.xpath("//label[.='timeout']/following-sibling::div/input"));
-    }
-
-    private Select getTimeoutUnit() {
-        return new Select(feedBox.findElement(By.xpath("//label[.='timeout']/following-sibling::div/select")));
-    }
-
-    private WebElement getParallel() {
-        return feedBox.findElement(By.xpath("//label[.='parallel']/following-sibling::div/input"));
-    }
-
-    private WebElement getMaxMaps() {
-        return feedBox.findElement(By.xpath("//label[.='maxMaps']/following-sibling::div/input"));
-    }
-
-    private WebElement getMapBandwidthKB() {
-        return feedBox.findElement(
-            By.xpath("//label[.='mapBandwidthKB']/following-sibling::div/input"));
-    }
-
-    private WebElement getFeedPropertyKey(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='property.key']")).get(index);
-    }
-    private WebElement getFeedPropertyValue(int index) {
-        return feedBox.findElements(By.xpath(
-            "//div[@ng-repeat='property in feed.customProperties']/*/input[@ng-model='property.value']")).get(index);
-    }
-
-    private WebElement getFeedPath(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='location.path']")).get(index);
-    }
-
-    private WebElement getFeedCatalogTableUri() {
-        return feedBox.findElement(
-            By.xpath("//input[@ng-model='feed.storage.catalog.catalogTable.uri']"));
-    }
-
-    private Select getFeedClusterSource() {
-        return new Select(feedBox.findElement(By.id("clusterNameSelect")));
-    }
-
-    private WebElement getFeedClusterRetentionLimit() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='cluster.retention.quantity']"));
-    }
-
-    private Select getFeedClusterRetentionUnit() {
-        return new Select(feedBox.findElement(By.xpath("//select[@ng-model='cluster.retention.unit']")));
-    }
-
-    private WebElement getFeedClusterValidityStartDate() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='cluster.validity.start.date']"));
-    }
-
-    private WebElement getFeedClusterValidityHour(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='hours']")).get(index);
-    }
-
-    private WebElement getFeedClusterValidityMinutes(int index) {
-        return feedBox.findElements(By.xpath("//input[@ng-model='minutes']")).get(index);
-    }
-
-    private WebElement getFeedClusterValidityMeridian(int index) {
-        return feedBox.findElements(By.xpath("//td[@ng-show='showMeridian']/button")).get(index);
-    }
-
-    private WebElement getFeedClusterValidityEndDate() {
-        return feedBox.findElement(By.xpath("//input[@ng-model='cluster.validity.end.date']"));
-    }
-
-    public List<String> getFeedFrequencyUnitValues(){
-        return getDropdownValues(getFeedFrequencyUnit());
-    }
-
-    public List<String> getFeedLateArrivalCutOffUnitValues(){
-        return getDropdownValues(getFeedLateArrivalCutOffUnit());
-    }
-
-    public List<String> getFeedClusterSourceValues(){
-        return getDropdownValues(getFeedClusterSource());
-    }
-
-    public List<String> getFeedClusterRetentionUnitValues(){
-        return getDropdownValues(getFeedClusterRetentionUnit());
-    }
-
-    public List<String> getJobPriorityValues(){
-        return getDropdownValues(getJobPriority());
-    }
-
-    public List<String> getTimeoutUnitValues(){
-        return getDropdownValues(getTimeoutUnit());
-    }
-
-    public void isFeedFrequencyDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFeedFrequencyQuantity(), "Frequency Quantity");
-        }else {
-            try{
-                getFeedFrequencyQuantity();
-                Assert.fail("Frequency Quantity found");
-            } catch (Exception ex){
-                LOGGER.info("Frequency Quantity not found");
-            }
-        }
-    }
-
-    public void isFeedDataPathDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFeedPath(0), "Feed Data Path");
-        }else {
-            try{
-                getFeedPath(0);
-                Assert.fail("Feed Data Path found");
-            } catch (Exception ex){
-                LOGGER.info("Feed Data Path not found");
-            }
-        }
-    }
-
-    public void isFeedClusterRetentionDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFeedClusterRetentionLimit(), "Cluster Retention Limit");
-        }else {
-            try{
-                getFeedClusterRetentionLimit();
-                Assert.fail("Cluster Retention Limit found");
-            } catch (Exception ex){
-                LOGGER.info("Cluster Retention Limit not found");
-            }
-        }
-    }
-
-    public void isSaveFeedButtonDisplayed(boolean isDisplayed) {
-        if (isDisplayed){
-            UIAssert.assertDisplayed(saveFeedButton, "Save Button");
-        }else {
-            try{
-                getSaveFeedButton();
-                Assert.fail("Save Button found");
-            } catch (Exception ex){
-                LOGGER.info("Save Button not found");
-            }
-        }
-    }
-
-    private WebElement getSaveFeedButton(){
-        return saveFeedButton;
-    }
-
-    public void clickNext(){
-        nextButton.click();
-        waitForAngularToFinish();
-    }
-
-    public void clickPrevious(){
-        previousButton.click();
-    }
-
-    public void clickCancel(){
-        cancelButton.click();
-    }
-
-    public void clickCatalogStorageButton(){
-        catalogStorageButton.click();
-        waitForAngularToFinish();
-    }
-
-    public void setFeedName(String name){
-        sendKeysSlowly(getFeedName(), name);
-    }
-    public void setFeedDescription(String description){
-        getFeedDescription().sendKeys(description);
-    }
-    public void setFeedTagKey(int index, String tagKey){
-        getFeedTagKey(index).sendKeys(tagKey);
-    }
-    public void setFeedTagValue(int index, String tagValue){
-        getFeedTagValue(index).sendKeys(tagValue);
-    }
-
-    // Tags are in the format, "first=yes","second=yes","third=yes". Need a separate method to handle this
-    public void setFeedTags(String tagsStr){
-        if (tagsStr == null){
-            return;
-        }
-        String[] tags = tagsStr.split(",");
-        for (int i=0; i < tags.length; i++){
-            String[] keyValue = tags[i].split("=");
-            setFeedTagKey(i, keyValue[0]);
-            setFeedTagValue(i, keyValue[1]);
-            if (tags.length > i+1){
-                addTagButton.click();
-            }
-        }
-    }
-
-    public void isTagsDisplayed(int index, boolean isDisplayed){
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFeedTagKey(index), "Tag Key Index - " + index);
-            UIAssert.assertDisplayed(getFeedTagValue(index), "Tag Value Index - " + index);
-        }else{
-            try{
-                getFeedTagKey(index);
-                Assert.fail("Tag Key Index - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Tag Key Index - " + index + " not found");
-            }
-            try{
-                getFeedTagValue(index);
-                Assert.fail("Tag Key Value - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Tag Key Value - " + index + " not found");
-            }
-        }
-    }
-
-    public String getFeedTagKeyText(int index){
-        return getFeedTagKey(index).getAttribute("value");
-    }
-
-    public String getFeedTagValueText(int index){
-        return getFeedTagValue(index).getAttribute("value");
-    }
-
-    public String getFeedGroupsText(){
-        return getFeedGroups().getAttribute("value");
-    }
-
-    public String getFeedFrequencyQuantityText(){
-        return getFeedFrequencyQuantity().getAttribute("value");
-    }
-
-    public String getFeedLateArrivalCutOffQuantityText(){
-        return getFeedLateArrivalCutOffQuantity().getAttribute("value");
-    }
-
-    public String getFeedPathText(int index){
-        return getFeedPath(index).getAttribute("value");
-    }
-
-    public String getFeedClusterRetentionLimitText(){
-        return getFeedClusterRetentionLimit().getAttribute("value");
-    }
-
-    public String getFeedClusterRetentionUnitText(){
-        return getFeedClusterRetentionUnit().getFirstSelectedOption().getText();
-    }
-
-    public void addProperty(){
-        waitForAngularToFinish();
-        addPropertyButton.click();
-    }
-
-    public void isPropertyDisplayed(int index, boolean isDisplayed){
-        if (isDisplayed){
-            UIAssert.assertDisplayed(getFeedPropertyKey(index), "Property Key Index - " + index);
-            UIAssert.assertDisplayed(getFeedPropertyValue(index), "Property Value Index - " + index);
-        }else{
-            try{
-                getFeedTagKey(index);
-                Assert.fail("Property Key Index - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Property Key Index - " + index + " not found");
-            }
-            try{
-                getFeedTagValue(index);
-                Assert.fail("Property Key Value - " + index + " found");
-            } catch (Exception ex){
-                LOGGER.info("Property Key Value - " + index + " not found");
-            }
-        }
-    }
-
-
-    public void deleteTagOrProperty(){
-        deleteButton.click();
-    }
-
-    public void setFeedGroups(String feedGroups){
-        getFeedGroups().sendKeys(feedGroups);
-    }
-
-    public void setFeedACLOwner(String feedACLOwner){
-        getFeedACLOwner().clear();
-        getFeedACLOwner().sendKeys(feedACLOwner);
-    }
-    public void setFeedACLGroup(String feedACLGroup){
-        getFeedACLGroup().clear();
-        getFeedACLGroup().sendKeys(feedACLGroup);
-    }
-    public void setFeedACLPermissions(String feedACLPermissions){
-        getFeedACLPermissions().clear();
-        getFeedACLPermissions().sendKeys(feedACLPermissions);
-    }
-    public void setFeedSchemaLocation(String feedSchemaLocation){
-        sendKeysSlowly(getFeedSchemaLocation(), feedSchemaLocation);
-    }
-    public void setFeedSchemaProvider(String feedSchemaProvider){
-        sendKeysSlowly(getFeedSchemaProvider(), feedSchemaProvider);
-    }
-
-    public void setFeedFrequencyQuantity(String frequencyQuantity){
-        getFeedFrequencyQuantity().sendKeys(frequencyQuantity);
-    }
-    public void setFeedFrequencyUnit(String frequencyUnit){
-        getFeedFrequencyUnit().selectByVisibleText(frequencyUnit);
-    }
-
-    public void setFeedLateArrivalCheckBox(){
-        getFeedLateArrivalCheckBox().click();
-    }
-    public void setFeedLateArrivalCutOffQuantity(int lateArrivalCutOffQuantity){
-        getFeedLateArrivalCutOffQuantity().sendKeys(Integer.toString(lateArrivalCutOffQuantity));
-    }
-    public void setFeedLateArrivalCutOffUnit(String lateArrivalCutOffUnit){
-        getFeedLateArrivalCutOffUnit().selectByVisibleText(lateArrivalCutOffUnit);
-    }
-    public void setFeedAvailabilityFlag(String availabilityFlag){
-        getFeedAvailabilityFlag().sendKeys(availabilityFlag);
-    }
-    public void setFeedTimeZone(){
-        String timeZone = "GMT+00:00";
-        getFeedTimeZone().selectByValue(timeZone);
-    }
-    public void setQueueName(String queueName){
-        getQueueName().clear();
-        getQueueName().sendKeys(queueName);
-    }
-    public void setJobPriority(String jobPriority) {
-        getJobPriority().selectByVisibleText(jobPriority);
-    }
-    public void setTimeoutQuantity(String timeoutQuantity){
-        getTimeoutQuantity().clear();
-        getTimeoutQuantity().sendKeys(timeoutQuantity);
-    }
-    public void setTimeoutUnit(String timeoutUnit) {
-        getTimeoutUnit().selectByVisibleText(timeoutUnit);
-    }
-    public void setParallel(String parallel){
-        getParallel().clear();
-        getParallel().sendKeys(parallel);
-    }
-    public void setMaxMaps(String maxMaps){
-        getMaxMaps().clear();
-        getMaxMaps().sendKeys(maxMaps);
-    }
-    public void setMapBandwidthKB(String mapBandwidthKB){
-        getMapBandwidthKB().clear();
-        getMapBandwidthKB().sendKeys(mapBandwidthKB);
-    }
-    public void setFeedPropertyKey(int index, String propertyKey){
-        getFeedPropertyKey(index).sendKeys(propertyKey);
-    }
-    public void setFeedPropertyValue(int index, String propertyValue){
-        getFeedPropertyValue(index).sendKeys(propertyValue);
-    }
-
-    public void setFeedPath(int index, String path){
-        getFeedPath(index).clear();
-        getFeedPath(index).sendKeys(path);
-    }
-
-    public void setFeedCatalogTableUri(String catalogTableUri){
-        getFeedCatalogTableUri().sendKeys(catalogTableUri);
-    }
-
-    public void setFeedClusterSource(String clusterSource){
-        getFeedClusterSource().selectByVisibleText(clusterSource);
-    }
-
-    public void setFeedClusterRetentionLimit(String clusterRetentionLimit){
-        getFeedClusterRetentionLimit().clear();
-        sendKeysSlowly(getFeedClusterRetentionLimit(), clusterRetentionLimit);
-    }
-
-    public void setFeedClusterRetentionUnit(String clusterRetentionUnit){
-        getFeedClusterRetentionUnit().selectByVisibleText(clusterRetentionUnit);
-    }
-
-    public void setFeedClusterValidityStartDate(String clusterValidityStartDate){
-        getFeedClusterValidityStartDate().clear();
-        sendKeysSlowly(getFeedClusterValidityStartDate(), clusterValidityStartDate);
-    }
-    public void setFeedClusterValidityHour(int index, String clusterValidityHour){
-        getFeedClusterValidityHour(index).clear();
-        getFeedClusterValidityHour(index).sendKeys(clusterValidityHour);
-    }
-    public void setFeedClusterValidityMinutes(int index, String clusterValidityMinutes){
-        getFeedClusterValidityMinutes(index).clear();
-        getFeedClusterValidityMinutes(index).sendKeys(clusterValidityMinutes);
-    }
-    public void setFeedClusterValidityMeridian(int index, String clusterValidityMeridian){
-        // Toggle AM PM, if clusterValidityMeridian value is not equal to AM PM Button text
-        if (!clusterValidityMeridian.equalsIgnoreCase(getFeedClusterValidityMeridian(index).getText())){
-            getFeedClusterValidityMeridian(index).click();
-        }
-    }
-    public void setFeedClusterValidityEndDate(String clusterValidityEndDate){
-        getFeedClusterValidityEndDate().clear();
-        sendKeysSlowly(getFeedClusterValidityEndDate(), clusterValidityEndDate);
-    }
-
-    // Enter feed info on Page 1 of FeedSetup Wizard
-    public void setFeedGeneralInfo(FeedMerlin feed) {
-        setFeedName(feed.getName());
-        setFeedDescription(feed.getDescription());
-        setFeedTags(feed.getTags());
-        setFeedGroups(feed.getGroups());
-        setFeedACLOwner(feed.getACL().getOwner());
-        setFeedACLGroup(feed.getACL().getGroup());
-        setFeedACLPermissions(feed.getACL().getPermission());
-        setFeedSchemaLocation(feed.getSchema().getLocation());
-        setFeedSchemaProvider(feed.getSchema().getProvider());
-        waitForAngularToFinish();
-    }
-
-    // Enter feed info on Page 2 of FeedSetup Wizard
-    public void setFeedPropertiesInfo(FeedMerlin feed){
-        setFeedFrequencyQuantity(feed.getFrequency().getFrequency());
-        setFeedFrequencyUnit(feed.getFrequency().getTimeUnit().toString());
-        setFeedLateArrivalCheckBox();
-        setFeedLateArrivalCutOffQuantity(feed.getLateArrival().getCutOff().getFrequencyAsInt());
-        setFeedLateArrivalCutOffUnit(feed.getLateArrival().getCutOff().getTimeUnit().toString());
-        setFeedAvailabilityFlag(feed.getAvailabilityFlag());
-        setFeedTimeZone();
-        setFeedPropertyKey(0, feed.getProperties().getProperties().get(0).getName());
-        setFeedPropertyValue(0, feed.getProperties().getProperties().get(0).getValue());
-        addProperty();
-        waitForAngularToFinish();
-        setFeedPropertyKey(1, feed.getProperties().getProperties().get(1).getName());
-        setFeedPropertyValue(1, feed.getProperties().getProperties().get(1).getValue());
-        waitForAngularToFinish();
-    }
-
-    // Enter feed info on Page 3 of FeedSetup Wizard
-    public void setFeedLocationInfo(FeedMerlin feed){
-        setFeedPath(0, feed.getLocations().getLocations().get(0).getPath());
-        setFeedPath(1, feed.getLocations().getLocations().get(1).getPath());
-        setFeedPath(2, feed.getLocations().getLocations().get(2).getPath());
-        waitForAngularToFinish();
-    }
-
-    // Enter feed info on Page 4 of FeedSetup Wizard
-    public void setFeedClustersInfo(FeedMerlin feed){
-        setFeedClusterSource(feed.getClusters().getClusters().get(0).getName());
-        setFeedLocationInfo(feed);
-        Date startDate = feed.getClusters().getClusters().get(0).getValidity().getStart();
-        Date endDate = feed.getClusters().getClusters().get(0).getValidity().getEnd();
-        setFeedClusterValidityStartDate(new SimpleDateFormat("MM/dd/yyyy").format(startDate));
-        setFeedClusterValidityHour(0, new SimpleDateFormat("h").format(startDate));
-        setFeedClusterValidityMinutes(0, new SimpleDateFormat("m").format(startDate));
-        setFeedClusterValidityMeridian(0, new SimpleDateFormat("a").format(startDate));
-        setFeedClusterValidityEndDate(new SimpleDateFormat("MM/dd/yyyy").format(endDate));
-        setFeedClusterValidityHour(1, new SimpleDateFormat("h").format(endDate));
-        setFeedClusterValidityMinutes(1, new SimpleDateFormat("m").format(endDate));
-        setFeedClusterValidityMeridian(1, new SimpleDateFormat("a").format(endDate));
-        /*
-        The merlin feed has 9000 months.
-        The UI only support till two digits.
-        Need to send hardcoded value of 99,
-        instead of feed.getClusters().getClusters().get(0).getRetention().getLimit().getFrequency()
-        */
-        setFeedClusterRetentionLimit("99");
-        setFeedClusterRetentionUnit(feed.getClusters().getClusters().get(0)
-            .getRetention().getLimit().getTimeUnit().name());
-        waitForAngularToFinish();
-    }
-
-    // setFeed method runs the default feed setup wizard, entering data on each page
-    public void setFeed(FeedMerlin feed){
-        setFeedGeneralInfo(feed);
-        clickNext();
-        setFeedPropertiesInfo(feed);
-        clickNext();
-        setFeedLocationInfo(feed);
-        clickNext();
-        setFeedClustersInfo(feed);
-        clickNext();
-        saveFeedButton.click();
-        waitForAlert();
-    }
-
-    @Override
-    public FeedMerlin getEntityFromXMLPreview() {
-        return FeedMerlin.fromString(getXMLPreview());
-    }
-
-    @Override
-    public WebElement getEditXMLButton() {
-        return driver.findElement(By.id("feed.editXML"));
-    }
-
-}


[20/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SLAAlert.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SLAAlert.twiki b/docs/src/site/twiki/falconcli/SLAAlert.twiki
deleted file mode 100644
index e5270fa..0000000
--- a/docs/src/site/twiki/falconcli/SLAAlert.twiki
+++ /dev/null
@@ -1,49 +0,0 @@
----+++SLAAlert
-
-[[CommonCLI][Common CLI Options]]
-
-<verbatim>
-Since: 0.8
-</verbatim>
-
-This command lists all the feed instances which have missed sla and are still not available. If a feed instance missed
-sla but is now available, then it will not be reported in results. The purpose of this API is alerting and hence it
- doesn't return feed instances which missed SLA but are available as they don't require any action.
-
-* Currently sla monitoring is supported only for feeds.
-
-* Option end is optional and will default to current time if missing.
-
-* Option name is optional, if provided only instances of that feed will be considered.
-
-Usage:
-
-*Example 1*
-
-*$FALCON_HOME/bin/falcon entity -type feed -start 2014-09-05T00:00Z -slaAlert  -end 2016-05-03T00:00Z -colo local*
-
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T11:59Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:00Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:01Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:02Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:03Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:04Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:05Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:06Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:07Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:08Z, tags: Missed SLA Low
-
-
-Response: default/Success!
-
-Request Id: default/216978070@qtp-830047511-4 - f5a6c129-ab42-4feb-a2bf-c3baed356248
-
-*Example 2*
-
-*$FALCON_HOME/bin/falcon entity -type feed -start 2014-09-05T00:00Z -slaAlert  -end 2016-05-03T00:00Z -colo local -name in*
-
-name: in, type: FEED, cluster: local, instanceTime: 2015-09-26T06:00Z, tags: Missed SLA High
-
-Response: default/Success!
-
-Request Id: default/1580107885@qtp-830047511-7 - f16cbc51-5070-4551-ad25-28f75e5e4cf2

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/Schedule.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/Schedule.twiki b/docs/src/site/twiki/falconcli/Schedule.twiki
deleted file mode 100644
index c4422e7..0000000
--- a/docs/src/site/twiki/falconcli/Schedule.twiki
+++ /dev/null
@@ -1,22 +0,0 @@
----+++Schedule
-
-[[CommonCLI][Common CLI Options]]
-
-Once submitted, an entity can be scheduled using schedule option. Process and feed can only be scheduled.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [process|feed] -name <<name>> -schedule
-
-Optional Args :
-
--skipDryRun When this argument is specified, Falcon skips oozie dryrun.
-
--doAs <username>
-
--properties <<key1:val1,...,keyN:valN>>. Specifying 'falcon.scheduler:native' as a property will schedule the entity on the the native scheduler of Falcon. Else, it will default to the engine specified in startup.properties. For details on Native scheduler, refer to [[FalconNativeScheduler][Falcon Native Scheduler]]
-
-Examples:
-
- $FALCON_HOME/bin/falcon entity  -type process -name sampleProcess -schedule
-
- $FALCON_HOME/bin/falcon entity  -type process -name sampleProcess -schedule -properties falcon.scheduler:native

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/StatusAdmin.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/StatusAdmin.twiki b/docs/src/site/twiki/falconcli/StatusAdmin.twiki
deleted file mode 100644
index dadb8e5..0000000
--- a/docs/src/site/twiki/falconcli/StatusAdmin.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Status
-
-[[CommonCLI][Common CLI Options]]
-
-Status returns the current state of Falcon (running or stopped).
-Usage:
-$FALCON_HOME/bin/falcon admin -status
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/StatusEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/StatusEntity.twiki b/docs/src/site/twiki/falconcli/StatusEntity.twiki
deleted file mode 100644
index 56d16f0..0000000
--- a/docs/src/site/twiki/falconcli/StatusEntity.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Status
-
-[[CommonCLI][Common CLI Options]]
-
-Status returns the current status of the entity.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -status
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/StatusInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/StatusInstance.twiki b/docs/src/site/twiki/falconcli/StatusInstance.twiki
deleted file mode 100644
index 047d334..0000000
--- a/docs/src/site/twiki/falconcli/StatusInstance.twiki
+++ /dev/null
@@ -1,21 +0,0 @@
----+++Status
-
-[[CommonCLI][Common CLI Options]]
-
-Status option via CLI can be used to get the status of a single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Along with the status of the instance time is also returned. Log location gives the oozie workflow url
-If the instance is in WAITING state, missing dependencies are listed.
-The job urls are populated for all actions of user workflow and non-succeeded actions of the main-workflow. The user then need not go to the underlying scheduler to get the job urls when needed to debug an issue in the job.
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}]
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -colo <<colo>>
--filterBy <<field1:value1,field2:value2>> -lifecycle <<lifecycles>>
--orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
--allAttempts To get all the attempts for corresponding instances
-
-<a href="../Restapi/InstanceStatus.html"> Optional params described here.</a>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/Submit.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/Submit.twiki b/docs/src/site/twiki/falconcli/Submit.twiki
deleted file mode 100644
index f2f7a49..0000000
--- a/docs/src/site/twiki/falconcli/Submit.twiki
+++ /dev/null
@@ -1,13 +0,0 @@
----+++Submit
-
-[[CommonCLI][Common CLI Options]]
-
-Submit option is used to set up entity definition.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -submit -type [cluster|datasource|feed|process] -file <entity-definition.xml>
-
-Example:
-$FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-
-Note: The url option in the above and all subsequent commands is optional. If not mentioned it will be picked from client.properties file. If the option is not provided and also not set in client.properties, Falcon CLI will fail.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SubmitRecipe.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SubmitRecipe.twiki b/docs/src/site/twiki/falconcli/SubmitRecipe.twiki
deleted file mode 100644
index d14b00d..0000000
--- a/docs/src/site/twiki/falconcli/SubmitRecipe.twiki
+++ /dev/null
@@ -1,17 +0,0 @@
----+++ Submit Recipe
-
-[[CommonCLI][Common CLI Options]]
-
-Submit the specified recipe.
-
-Usage:
-$FALCON_HOME/bin/falcon recipe -name <name>
-Name of the recipe. User should have defined <name>-template.xml and <name>.properties in the path specified by falcon.recipe.path in client.properties file. falcon.home path is used if its not specified in client.properties file.
-If its not specified in client.properties file and also if files cannot be found at falcon.home, Falcon CLI will fail.
-
-Optional Args : -tool <recipeToolClassName>
-Falcon provides a base tool that recipes can override. If this option is not specified the default Recipe Tool
-RecipeTool defined is used. This option is required if user defines his own recipe tool class.
-
-Example:
-$FALCON_HOME/bin/falcon recipe -name hdfs-replication
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SummaryEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SummaryEntity.twiki b/docs/src/site/twiki/falconcli/SummaryEntity.twiki
deleted file mode 100644
index 800f9fc..0000000
--- a/docs/src/site/twiki/falconcli/SummaryEntity.twiki
+++ /dev/null
@@ -1,14 +0,0 @@
----+++Summary
-
-[[CommonCLI][Common CLI Options]]
-
-Summary of entities of a particular type and a cluster will be listed. Entity summary has N most recent instances of entity.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [feed|process] -summary
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -fields <<field1,field2>>
--filterBy <<field1:value1,field2:value2>> -tags <<tagkey=tagvalue,tagkey=tagvalue>>
--orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10 -numInstances 7
-
-<a href="../Restapi/EntitySummary.html">Optional params described here.</a>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SummaryInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SummaryInstance.twiki b/docs/src/site/twiki/falconcli/SummaryInstance.twiki
deleted file mode 100644
index f7ca0b4..0000000
--- a/docs/src/site/twiki/falconcli/SummaryInstance.twiki
+++ /dev/null
@@ -1,20 +0,0 @@
----+++Summary
-
-[[CommonCLI][Common CLI Options]]
-
-Summary option via CLI can be used to get the consolidated status of the instances between the specified time period.
-Each status along with the corresponding instance count are listed for each of the applicable colos.
-The unscheduled instances between the specified time period are included as UNSCHEDULED in the output to provide more clarity.
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getSummary is successful", instancesSummary:[{"cluster": <<name>> "map":[{"SUCCEEDED":"1"}, {"WAITING":"1"}, {"RUNNING":"1"}]}]}
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -summary
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -colo <<colo>>
--filterBy <<field1:value1,field2:value2>> -lifecycle <<lifecycles>>
--orderBy field -sortOrder <<sortOrder>>
-
-<a href="../Restapi/InstanceSummary.html">Optional params described here.</a>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SuspendEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SuspendEntity.twiki b/docs/src/site/twiki/falconcli/SuspendEntity.twiki
deleted file mode 100644
index 7618e9c..0000000
--- a/docs/src/site/twiki/falconcli/SuspendEntity.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Suspend
-
-[[CommonCLI][Common CLI Options]]
-
-Suspend on an entity results in suspension of the oozie bundle that was scheduled earlier through the schedule function. No further instances are executed on a suspended entity. Only schedule-able entities(process/feed) can be suspended.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -suspend
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/SuspendInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/SuspendInstance.twiki b/docs/src/site/twiki/falconcli/SuspendInstance.twiki
deleted file mode 100644
index 221cf5c..0000000
--- a/docs/src/site/twiki/falconcli/SuspendInstance.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Suspend
-
-[[CommonCLI][Common CLI Options]]
-
-Suspend is used to suspend a instance or instances  for the given process. This option pauses the parent workflow at the state, which it was in at the time of execution of this command.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -suspend -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/Touch.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/Touch.twiki b/docs/src/site/twiki/falconcli/Touch.twiki
deleted file mode 100644
index afbd848..0000000
--- a/docs/src/site/twiki/falconcli/Touch.twiki
+++ /dev/null
@@ -1,10 +0,0 @@
----+++Touch
-
-[[CommonCLI][Common CLI Options]]
-
-Force Update operation allows an already submitted/scheduled entity to be updated.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -touch
-
-Optional Arg : -skipDryRun. When this argument is specified, Falcon skips oozie dryrun.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/TriageInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/TriageInstance.twiki b/docs/src/site/twiki/falconcli/TriageInstance.twiki
deleted file mode 100644
index c2c32cd..0000000
--- a/docs/src/site/twiki/falconcli/TriageInstance.twiki
+++ /dev/null
@@ -1,9 +0,0 @@
----+++Triage
-
-[[CommonCLI][Common CLI Options]]
-
-Given a feed/process instance this command traces it's ancestors to find what all ancestors have failed. It's useful if
-lot of instances are failing in a pipeline as it then finds out the root cause of the pipeline being stuck.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -triage -type <<feed/process>> -name <<name>> -start "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/UpdateEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/UpdateEntity.twiki b/docs/src/site/twiki/falconcli/UpdateEntity.twiki
deleted file mode 100644
index ae60559..0000000
--- a/docs/src/site/twiki/falconcli/UpdateEntity.twiki
+++ /dev/null
@@ -1,14 +0,0 @@
----+++Update
-
-[[CommonCLI][Common CLI Options]]
-
-Update operation allows an already submitted/scheduled entity to be updated. Cluster and datasource updates are
-currently not allowed.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -update -file <<path_to_file>>
-
-Optional Arg : -skipDryRun. When this argument is specified, Falcon skips oozie dryrun.
-
-Example:
-$FALCON_HOME/bin/falcon entity -type process -name hourly-reports-generator -update -file /process/definition.xml

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/VersionAdmin.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/VersionAdmin.twiki b/docs/src/site/twiki/falconcli/VersionAdmin.twiki
deleted file mode 100644
index 453f6a1..0000000
--- a/docs/src/site/twiki/falconcli/VersionAdmin.twiki
+++ /dev/null
@@ -1,7 +0,0 @@
----+++Version
-
-[[CommonCLI][Common CLI Options]]
-
-Version returns the current version of Falcon installed.
-Usage:
-$FALCON_HOME/bin/falcon admin -version

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/VertexEdgesMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/VertexEdgesMetadata.twiki b/docs/src/site/twiki/falconcli/VertexEdgesMetadata.twiki
deleted file mode 100644
index e9182fc..0000000
--- a/docs/src/site/twiki/falconcli/VertexEdgesMetadata.twiki
+++ /dev/null
@@ -1,12 +0,0 @@
----+++ Vertex Edges
-
-[[CommonCLI][Common CLI Options]]
-
-Get the adjacent vertices or edges of the vertex with the specified direction.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -edges -id <<vertex-id>> -direction <<direction>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -edges -id 4 -direction both
-$FALCON_HOME/bin/falcon metadata -edges -id 4 -direction inE

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/VertexMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/VertexMetadata.twiki b/docs/src/site/twiki/falconcli/VertexMetadata.twiki
deleted file mode 100644
index b2c62e8..0000000
--- a/docs/src/site/twiki/falconcli/VertexMetadata.twiki
+++ /dev/null
@@ -1,11 +0,0 @@
----+++ Vertex
-
-[[CommonCLI][Common CLI Options]]
-
-Get the vertex with the specified id.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -vertex -id <<id>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -vertex -id 4

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/VerticesMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/VerticesMetadata.twiki b/docs/src/site/twiki/falconcli/VerticesMetadata.twiki
deleted file mode 100644
index 1b32ad5..0000000
--- a/docs/src/site/twiki/falconcli/VerticesMetadata.twiki
+++ /dev/null
@@ -1,11 +0,0 @@
----+++ Vertices
-
-[[CommonCLI][Common CLI Options]]
-
-Get all vertices for a key index given the specified value.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -vertices -key <<key>> -value <<value>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -vertices -key type -value feed-instance

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/index.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/index.twiki b/docs/src/site/twiki/index.twiki
deleted file mode 100644
index a1ee0a3..0000000
--- a/docs/src/site/twiki/index.twiki
+++ /dev/null
@@ -1,43 +0,0 @@
----+ Falcon - Feed management and data processing platform
-
-Falcon is a feed processing and feed management system aimed at making it
-easier for end consumers to onboard their feed processing and feed
-management on hadoop clusters.
-
----++ Why?
-
-   * Establishes relationship between various data and processing elements on a Hadoop environment
-
-   * Feed management services such as feed retention, replications across clusters, archival etc.
-
-   * Easy to onboard new workflows/pipelines, with support for late data handling, retry policies
-
-   * Integration with metastore/catalog such as Hive/HCatalog
-
-   * Provide notification to end customer based on availability of feed groups
-     (logical group of related feeds, which are likely to be used together)
-
-   * Enables use cases for local processing in colo and global aggregations
-
-   * Captures Lineage information for feeds and processes
-
----+ Getting Started
-
-Start with these simple steps to install an falcon instance [[InstallationSteps][Simple setup]]. Also refer
-to Falcon architecture and documentation in [[FalconDocumentation][Documentation]]. [[OnBoarding][On boarding]]
-describes steps to on-board a pipeline to Falcon. It also gives a sample pipeline for reference.
-[[EntitySpecification][Entity Specification]] gives complete details of all Falcon entities.
-
-[[falconcli/FalconCLI][Falcon CLI]] implements [[restapi/ResourceList][Falcon's RESTful API]] and
-describes various options for the command line utility provided by Falcon.
-
-Falcon provides OOTB [[HiveIntegration][lifecycle management for Tables in Hive (HCatalog)]]
-such as table replication for BCP and table eviction. Falcon also enforces
-[[Security][Security]] on protected resources and enables SSL.
-
-#LicenseInfo
----+ Licensing Information
-
-Falcon is distributed under [[http://www.apache.org/licenses/LICENSE-2.0][Apache License 2.0]].
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AdjacentVertices.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AdjacentVertices.twiki b/docs/src/site/twiki/restapi/AdjacentVertices.twiki
deleted file mode 100644
index 1e60866..0000000
--- a/docs/src/site/twiki/restapi/AdjacentVertices.twiki
+++ /dev/null
@@ -1,91 +0,0 @@
----++  GET api/metadata/lineage/vertices/:id/:direction
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get a list of adjacent vertices or edges with a direction.
-
----++ Parameters
-   * :id is the id of the vertex.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-   * :direction is the direction associated with the edges.
-
-   To get the adjacent out vertices of vertex pass direction as out, in to get adjacent in vertices
-   and both to get both in and out adjacent vertices. Similarly to get the out edges of vertex
-   pass outE, inE to get in edges and bothE to get the both in and out edges of vertex.
-
-      * out  : get the adjacent out vertices of vertex
-      * in   : get the adjacent in vertices of vertex
-      * both : get the both adjacent in and out vertices of vertex
-      * outCount  : get the number of out vertices of vertex
-      * inCount   : get the number of in vertices of vertex
-      * bothCount : get the number of adjacent in and out vertices of vertex
-      * outIds  : get the identifiers of out vertices of vertex
-      * inIds   : get the identifiers of in vertices of vertex
-      * bothIds : get the identifiers of adjacent in and out vertices of vertex
-
----++ Results
-Adjacent vertices of the vertex for the specified direction.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/4/out
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": [
-        {
-            "timestamp":"2014-04-21T20:55Z",
-            "name":"sampleFeed",
-            "type":"feed-instance",
-            "_id":8,
-            "_type":"vertex"
-        }
-    ],
-    "totalSize":1}
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/4/bothE
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results":[
-        {
-            "_id":"Q5V-4-5g",
-            "_type":"edge",
-            "_outV":4,
-            "_inV":8,
-            "_label":"output"
-        }
-    ],
-    "totalSize":1
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/4/bothE?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results":[
-        {
-            "_id":"Q5V-4-5g",
-            "_type":"edge",
-            "_outV":4,
-            "_inV":8,
-            "_label":"output"
-        }
-    ],
-    "totalSize":1
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AdminConfig.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AdminConfig.twiki b/docs/src/site/twiki/restapi/AdminConfig.twiki
deleted file mode 100644
index 675b19e..0000000
--- a/docs/src/site/twiki/restapi/AdminConfig.twiki
+++ /dev/null
@@ -1,35 +0,0 @@
----++  GET /api/admin/config/:config-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get configuration information of the falcon server.
-
----++ Parameters
-   * :config-type can be build, deploy, startup or runtime
-
----++ Results
-Configuration information of the server.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/admin/config/deploy
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "properties": [
-        {
-            "value": "embedded",
-            "key": "deploy.mode"
-        },
-        {
-            "value": "all",
-            "key": "domain"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AdminStack.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AdminStack.twiki b/docs/src/site/twiki/restapi/AdminStack.twiki
deleted file mode 100644
index 08903a2..0000000
--- a/docs/src/site/twiki/restapi/AdminStack.twiki
+++ /dev/null
@@ -1,40 +0,0 @@
----++  GET /api/admin/stack
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get stack trace of the falcon server.
-
----++ Parameters
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Stack trace of the server.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/admin/stack?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-Reference Handler
-State: WAITING
-java.lang.Object.wait(Native Method)
-java.lang.Object.wait(Object.java:485)
-java.lang.ref.Reference$ReferenceHandler.run(Reference.java:116)Finalizer
-
-...
-
-State: TIMED_WAITING
-sun.misc.Unsafe.park(Native Method)
-java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:196)
-java.util.concurrent.SynchronousQueue$TransferStack.awaitFulfill(SynchronousQueue.java:424)
-java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:323)
-java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:874)
-java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:955)
-java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:917)
-java.lang.Thread.run(Thread.java:695)
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AdminVersion.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AdminVersion.twiki b/docs/src/site/twiki/restapi/AdminVersion.twiki
deleted file mode 100644
index 7db2d8f..0000000
--- a/docs/src/site/twiki/restapi/AdminVersion.twiki
+++ /dev/null
@@ -1,35 +0,0 @@
----++  GET /api/admin/version
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get version of the falcon server.
-
----++ Parameters
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Version of the server.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/admin/version?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "properties":[
-        {
-            "key":"Version",
-            "value":"0.4-incubating-SNAPSHOT-rb47788d1112fcf949c22a3860934167237b395b0"
-        },
-        {
-            "key":"Mode",
-            "value":"embedded"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AllEdges.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AllEdges.twiki b/docs/src/site/twiki/restapi/AllEdges.twiki
deleted file mode 100644
index 303ac50..0000000
--- a/docs/src/site/twiki/restapi/AllEdges.twiki
+++ /dev/null
@@ -1,42 +0,0 @@
----++  GET pi/metadata/lineage//edges/all
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get all edges.
-
----++ Parameters
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-All edges in lineage graph.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/edges/all?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": [
-        {
-            "_id":"Q5V-4-5g",
-            "_type":"edge",
-            "_outV":4,
-            "_inV":8,
-            "_label":"output"
-        },
-        {
-            "_id":"Q6t-c-5g",
-            "_type":"edge",
-            "_outV":12,
-            "_inV":16,
-            "_label":"output"
-        }
-    ],
-    "totalSize": 2
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/AllVertices.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/AllVertices.twiki b/docs/src/site/twiki/restapi/AllVertices.twiki
deleted file mode 100644
index d2beb48..0000000
--- a/docs/src/site/twiki/restapi/AllVertices.twiki
+++ /dev/null
@@ -1,43 +0,0 @@
----++  GET api/metadata/lineage/vertices/all
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get all vertices.
-
----++ Parameters
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-All vertices in lineage graph.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/vertices/all?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results": [
-        {
-            "timestamp":"2014-04-21T20:55Z",
-            "name":"sampleIngestProcess\/2014-03-01T10:00Z",
-            "type":"process-instance",
-            "version":"2.0.0",
-            "_id":4,
-            "_type":"vertex"
-        },
-        {
-            "timestamp":"2014-04-21T20:55Z",
-            "name":"rawEmailFeed\/2014-03-01T10:00Z",
-            "type":"feed-instance",
-            "_id":8,
-            "_type":"vertex"
-        }
-    ],
-    "totalSize": 2
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/Edge.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/Edge.twiki b/docs/src/site/twiki/restapi/Edge.twiki
deleted file mode 100644
index 7c4dbe5..0000000
--- a/docs/src/site/twiki/restapi/Edge.twiki
+++ /dev/null
@@ -1,34 +0,0 @@
----++  GET api/metadata/lineage/edges/:id
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Gets the edge with specified id.
-
----++ Parameters
-   * :id is the unique id of the edge.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Edge with the specified id.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/edges/Q6t-c-5g?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "results":
-        {
-            "_id":"Q6t-c-5g",
-            "_type":"edge",
-            "_outV":12,
-            "_inV":16,
-            "_label":"output"
-        }
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityDefinition.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityDefinition.twiki b/docs/src/site/twiki/restapi/EntityDefinition.twiki
deleted file mode 100644
index 5e1165b..0000000
--- a/docs/src/site/twiki/restapi/EntityDefinition.twiki
+++ /dev/null
@@ -1,53 +0,0 @@
----++  GET /api/entities/definition/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get definition of the entity.
-
----++ Parameters
-   * :entity-type can be cluster, feed or process.
-   * :entity-name is name of the entity.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Definition of the entity.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/definition/process/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<process name="SampleProcess" xmlns="uri:falcon:process:0.1">
-  <clusters>
-    <cluster name="primary-cluster">
-      <validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z"/>
-    </cluster>
-  </clusters>
-  <parallel>1</parallel>
-  <order>FIFO</order>
-  <frequency>hours(1)</frequency>
-  <timezone>UTC</timezone>
-  <inputs>
-    <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)"/>
-  </inputs>
-  <outputs>
-    <output name="output" feed="SampleOutput" instance="yesterday(0,0)"/>
-  </outputs>
-  <properties>
-    <property name="queueName" value="default"/>
-    <property name="ssh.host" value="localhost"/>
-    <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}"/>
-  </properties>
-  <workflow engine="oozie" path="/examples/apps/aggregator"/>
-  <retry policy="exp-backoff" delay="minutes(5)" attempts="3"/>
-  <late-process policy="exp-backoff" delay="hours(1)">
-    <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput"/>
-  </late-process>
-</process>
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityDelete.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityDelete.twiki b/docs/src/site/twiki/restapi/EntityDelete.twiki
deleted file mode 100644
index a488943..0000000
--- a/docs/src/site/twiki/restapi/EntityDelete.twiki
+++ /dev/null
@@ -1,31 +0,0 @@
----++  DELETE /api/entities/delete/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Delete the specified entity.
-
----++ Parameters
-   * :entity-type can be feed or process.
-   * :entity-name is name of the feed or process.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Results of the delete operation.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-DELETE http://localhost:15000/api/entities/delete/cluster/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "falcon\/17ff6ca6-1c8a-459f-9ba8-8fec480e384a\n",
-    "message": "falcon\/SampleProcess(cluster) removed successfully\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityDependencies.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityDependencies.twiki b/docs/src/site/twiki/restapi/EntityDependencies.twiki
deleted file mode 100644
index 864b084..0000000
--- a/docs/src/site/twiki/restapi/EntityDependencies.twiki
+++ /dev/null
@@ -1,43 +0,0 @@
----++  GET /api/entities/dependencies/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get dependencies of the entity.
-
----++ Parameters
-   * :entity-type can be cluster, feed or process.
-   * :entity-name is name of the entity.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Dependenciess of the entity.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/dependencies/process/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "entity": [
-        {
-            "name": "SampleInput",
-            "type": "feed",
-            "tag": [Input]
-        },
-        {
-            "name": "SampleOutput",
-            "type": "feed"
-            "tag": [Output]
-        },
-        {
-            "name": "primary-cluster",
-            "type": "cluster"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityLineage.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityLineage.twiki b/docs/src/site/twiki/restapi/EntityLineage.twiki
deleted file mode 100644
index f2258f2..0000000
--- a/docs/src/site/twiki/restapi/EntityLineage.twiki
+++ /dev/null
@@ -1,40 +0,0 @@
----++  GET api/metadata/lineage/entities?pipeline=:pipeline
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-It returns the graph depicting the relationship between the various processes and feeds in a given pipeline.
-
----++ Parameters
-   * :pipeline is the name of the pipeline
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-It returns a json graph
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/entities?pipeline=my-pipeline&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "vertices": ["my-minutely-process", "my-hourly-process"],
-    "edges":
-    [
-        {
-         "from"  : "my-minutely-process",
-         "to"    : "my-hourly-process",
-         "label" : "my-minutely-feed"
-        },
-        {
-         "from"  : "my-hourly-process",
-         "to"    : "my-minutely-process",
-         "label" : "my-hourly-feedback"
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityList.twiki b/docs/src/site/twiki/restapi/EntityList.twiki
deleted file mode 100644
index 2c2a734..0000000
--- a/docs/src/site/twiki/restapi/EntityList.twiki
+++ /dev/null
@@ -1,164 +0,0 @@
----++  GET /api/entities/list/:entity-type?fields=:fields
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get list of the entities.
-
----++ Parameters
-   * :entity-type Comma-separated entity types. Can be empty. Valid entity types are cluster, feed or process.
-   * fields <optional param> Fields of entity that the user wants to view, separated by commas.
-      * Valid options are STATUS, TAGS, PIPELINES, CLUSTERS.
-   * nameseq <optional param> Subsequence of entity name. Not case sensitive.
-      * The entity name needs to contain all the characters in the subsequence in the same order.
-      * Example 1: "sample1" will match the entity named "SampleFeed1-2".
-      * Example 2: "mhs" will match the entity named "New-My-Hourly-Summary".
-   * tagkeys <optional param> Keywords in tags, separated by comma. Not case sensitive.
-      * The returned entities will have tags that match all the tag keywords.
-   * filterBy <optional param> Filter results by list of field:value pairs. Example: filterBy=STATUS:RUNNING,PIPELINES:clickLogs
-      * Supported filter fields are NAME, STATUS, PIPELINES, CLUSTER.
-      * Query will do an AND among filterBy fields.
-   * tags <optional param> Return list of entities that have specified tags, separated by a comma. Query will do AND on tag values.
-      * Example: tags=consumer=consumer@xyz.com,owner=producer@xyz.com
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by "name".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-   * Note:
-      * We have two filtering parameters for entity tags: "tags" and "tagkeys". "tags" does the exact match in key=value fashion, while "tagkeys" finds all the entities with the given key as a substring in the tags. This "tagkeys" filter is introduced for the user who doesn't remember the exact tag but some keywords in the tag. It also helps users to save the time of typing long tags.
-      * The returned entities will match all the filtering criteria.
-
----++ Results
-Total number of results and a list of entities.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/list/feed
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "totalResults":"2”,
-    "entity": [
-        {
-            "name": "SampleOutput",
-            "type": "feed"
-        },
-        {
-            "name": "SampleInput",
-            "type": "feed"
-        }
-    ]
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/list
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "totalResults":"4”,
-    "entity": [
-        {
-            "name"  : "SampleCluster1",
-            "type"  : "cluster"
-        }
-        {
-            "name"  : "SampleOutput",
-            "type"  : "feed"
-        },
-        {
-            "name"  : "SampleInput",
-            "type"  : "feed"
-        }
-        {
-            "name"  : "SampleProcess1",
-            "type"  : "process"
-        }
-    ]
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/list/feed?fields=status
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "totalResults":"2”,
-    "entity": [
-        {
-            "name"  : "SampleOutput",
-            "type"  : "feed",
-            "status": "RUNNING"
-        },
-        {
-            "name": "SampleInput",
-            "type": "feed",
-            "status": "RUNNING"
-        }
-    ]
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/list/process?filterBy=STATUS:RUNNING,PIPELINES:dataReplication&fields=status,pipelines,tags&tags=consumer=consumer@xyz.com&orderBy=name&offset=2&numResults=2
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "totalResults":"10”,
-    "entity": [
-        {
-            "name"  : "SampleProcess1",
-            "type"  : "process",
-            "status": "RUNNING",
-            "pipelines": "dataReplication",
-            "tags": "consumer=consumer@xyz.com"
-        },
-        {
-            "name": "SampleProcess3",
-            "type": "process",
-            "status": "RUNNING",
-            "pipelines": "dataReplication",
-            "tags": "consumer=consumer@xyz.com,owner=producer@xyz.com"
-        }
-    ]
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/list/feed,process?nameseq=samplebill&tagkeys=billing,healthcare&numResults=2&offset=1&fields=status,clusters,tags&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "totalResults":"4”,
-    "entity”:[
-        {
-            "type":"FEED”,
-            "name":"SampleUSHealthBill”,
-            "status":"SUBMITTED”,
-            "tags”: {"tag":["related=ushealthcare","department=billingDepartment"]},
-            "clusters": {"cluster":["SampleCluster1","primaryCluster”]}
-        },
-        {
-            "type":"PROCESS”,
-            "name":"SampleHealthBill”,
-            "status":"SUBMITTED”,
-            "tags”: {"tag":["related=healthcare","department=billingDepartment"]},
-            "clusters": {"cluster":"primaryCluster”}
-        }
-    ]
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityResume.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityResume.twiki b/docs/src/site/twiki/restapi/EntityResume.twiki
deleted file mode 100644
index d0bbe41..0000000
--- a/docs/src/site/twiki/restapi/EntityResume.twiki
+++ /dev/null
@@ -1,30 +0,0 @@
----++  POST /api/entities/resume/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Resume a supended entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the resume command.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/resume/process/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/106582a9-130f-4903-8b8f-f95d7b286c30\n",
-    "message": "default\/SampleProcess(process) resumed successfully\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntitySchedule.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntitySchedule.twiki b/docs/src/site/twiki/restapi/EntitySchedule.twiki
deleted file mode 100644
index 0dede9b..0000000
--- a/docs/src/site/twiki/restapi/EntitySchedule.twiki
+++ /dev/null
@@ -1,100 +0,0 @@
----++  POST /api/entities/schedule/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Schedule an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * skipDryRun : Optional query param, Falcon skips oozie dryrun when value is set to true.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-   * properties <key1:val1,...,keyN:valN> : Optional query param, supplies a set of key-value pairs that will be available to the entity in the coordinator configuration. These values will not override properties with the same name predefined in the entity specification. For example, to change the scheduler used for scheduling the entity you would set the property _falcon.scheduler_ in the properties parameter to _native_ to use the Falcon Scheduler or to _oozie_ to use the Oozie Scheduler.
-
-
----++ Results
-Result of the schedule command.
-
----++ Examples
----+++ Oozie Workflow
-<verbatim>
-<workflow-app xmlns="uri:oozie:workflow:0.4" name="aggregator-wf">
-  <start to="aggregator" />
-  <action name="aggregator">
-    <java>
-      <job-tracker>${jobTracker}</job-tracker>
-      <name-node>${nameNode}</name-node>
-      <configuration>
-        <property>
-          <name>mapred.job.queue.name</name>
-          <value>${queueName}</value>
-        </property>
-      </configuration>
-      <main-class>com.company.hadoop.AggregatorJob</main-class>
-      <java-opts>-Dframework.instrumentation.host=${instrumentationServer}</java-opts>
-      <arg>--input.path=${inputBasePath}</arg>
-      <arg>--output.path=${outputBasePath}</arg>
-    </java>
-    <ok to="end" />
-    <error to="fail" />
-  </action>
-  <kill name="fail">
-    <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
-  </kill>
-</workflow-app>
-</verbatim>
----+++ Submitted Process
-<verbatim>
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
-<process xmlns="uri:falcon:process:0.1" name="SampleProcess" >
-    <clusters>
-      <cluster name="primary-cluster">
-        <validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" />
-      </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="default" />
-        <property name="ssh.host" value="localhost" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-        <property name="instrumentationServer" value="${coord:conf('instrumentation.host')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/examples/apps/aggregator" />
-    <retry policy="exp-backoff" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/schedule/process/SampleProcess?skipDryRun=false&doAs=joe&properties=instrumentation.host:intrumentation.localdomain
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/ee735c95-98bd-41b8-a705-2e78bcfcdcd9\n",
-    "message": "default\/SampleProcess(process) scheduled successfully\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
----+++ Notes
-In this example, the value of _framework.instrumentation.host_ in the Oozie workflow will be _intrumentation.localdomain_ which is the property passed when the process is scheduled.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityStatus.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityStatus.twiki b/docs/src/site/twiki/restapi/EntityStatus.twiki
deleted file mode 100644
index 188019d..0000000
--- a/docs/src/site/twiki/restapi/EntityStatus.twiki
+++ /dev/null
@@ -1,30 +0,0 @@
----++  GET /api/entities/status/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get status of the entity.
-
----++ Parameters
-   * :entity-type can be cluster, feed or process.
-   * :entity-name is name of the entity.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Status of the entity.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/status/process/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/4d35b382-852a-4bc7-9972-b9db3493322a\n",
-    "message": "default\/SUBMITTED\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntitySubmit.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntitySubmit.twiki b/docs/src/site/twiki/restapi/EntitySubmit.twiki
deleted file mode 100644
index a8dc9d7..0000000
--- a/docs/src/site/twiki/restapi/EntitySubmit.twiki
+++ /dev/null
@@ -1,105 +0,0 @@
----++ POST  api/entities/submit/:entity-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Submit the given entity.
-
----++ Parameters
-   * :entity-type can be cluster, feed or process.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the submission.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/submit/feed
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Hourly sample input data -->
-
-<feed description="sample input data"
-      name="SampleInput" xmlns="uri:falcon:feed:0.1"
-      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <groups>group</groups>
-    <frequency>hours(1)</frequency>
-    <late-arrival cut-off="hours(6)" />
-    <clusters>
-        <cluster name="primary-cluster" type="source">
-            <!--validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" timezone="UTC" /-->
-            <validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" />
-            <retention limit="months(24)" action="delete" />
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/bootcamp/data/${YEAR}-${MONTH}-${DAY}-${HOUR}/SampleInput" />
-        <location type="stats" path="/projects/bootcamp/stats/SampleInput" />
-        <location type="meta" path="/projects/bootcamp/meta/SampleInput" />
-    </locations>
-
-    <ACL owner="suser" group="users" permission="0755" />
-
-    <schema location="/none" provider="none" />
-</feed>
-</verbatim>
-
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/d72a41f7-6420-487b-8199-62d66e492e35\n",
-    "message": "default\/Submit successful (feed) SampleInput\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/submit/process?doAs=joe
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
-<process xmlns="uri:falcon:process:0.1" name="SampleProcess" >
-    <clusters>
-      <cluster name="primary-cluster">
-	<validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" />
-      </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="default" />
-        <property name="ssh.host" value="localhost" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/examples/apps/aggregator" />
-    <retry policy="exp-backoff" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
-
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/e5cc8230-f356-4566-9b65-536abdff8aa3\n",
-    "message": "default\/Submit successful (process) SampleProcess\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntitySubmitAndSchedule.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntitySubmitAndSchedule.twiki b/docs/src/site/twiki/restapi/EntitySubmitAndSchedule.twiki
deleted file mode 100644
index 3cc23e9..0000000
--- a/docs/src/site/twiki/restapi/EntitySubmitAndSchedule.twiki
+++ /dev/null
@@ -1,64 +0,0 @@
----++  POST /api/entities/submitAndSchedule/:entity-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Submits and schedules an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * skipDryRun : Optional query param, Falcon skips oozie dryrun when value is set to true.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the submit and schedule command.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/submitAndSchedule/process?skipDryRun=false&doAs=joe
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
-<process xmlns="uri:falcon:process:0.1" name="SampleProcess" >
-    <clusters>
-      <cluster name="primary-cluster">
-	<validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" />
-      </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="default" />
-        <property name="ssh.host" value="localhost" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/examples/apps/aggregator" />
-    <retry policy="exp-backoff" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "schedule\/default\/b5b40931-175b-4b15-8f2b-02ef2e66f06b\n\nsubmit\/default\/b5b40931-175b-4b15-8f2b-02ef2e66f06b\n\n",
-    "message": "schedule\/default\/SampleProcess(process) scheduled successfully\n\nsubmit\/default\/Submit successful (process) SampleProcess\n\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntitySummary.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntitySummary.twiki b/docs/src/site/twiki/restapi/EntitySummary.twiki
deleted file mode 100644
index 763c2a7..0000000
--- a/docs/src/site/twiki/restapi/EntitySummary.twiki
+++ /dev/null
@@ -1,74 +0,0 @@
----++  GET /api/entities/summary/:entity-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Given an EntityType and cluster, get list of entities along with summary of N recent instances of each entity
-
----++ Parameters
-   * :entity-type Valid options are feed or process.
-   * cluster Show entities that belong to this cluster.
-   * start <optional param> Show entity summaries from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * By default, it is set to (end - 2 days).
-   * end <optional param> Show entity summary up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * Default is set to now.
-   * fields <optional param> Fields of entity that the user wants to view, separated by commas.
-      * Valid options are STATUS, TAGS, PIPELINES.
-   * filterBy <optional param> Filter results by list of field:value pairs. Example: filterBy=STATUS:RUNNING,PIPELINES:clickLogs
-      * Supported filter fields are NAME, STATUS, PIPELINES, CLUSTER.
-      * Query will do an AND among filterBy fields.
-   * tags <optional param> Return list of entities that have specified tags, separated by a comma. Query will do AND on tag values.
-      * Example: tags=consumer=consumer@xyz.com,owner=producer@xyz.com
-   * orderBy <optional param> Field by which results should be ordered.
-      * Supports ordering by "name".
-   * sortOrder <optional param> Valid options are "asc" and "desc"
-   * offset <optional param> Show results from the offset, used for pagination. Defaults to 0.
-   * numResults <optional param> Number of results to show per request, used for pagination. Only integers > 0 are valid, Default is 10.
-   * numInstances <optional param> Number of recent instances to show per entity. Only integers > 0 are valid, Default is 7.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Show entities along with summary of N instances for each entity.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/summary/feed?cluster=primary-cluster&filterBy=STATUS:RUNNING&fields=status&tags=consumer=consumer@xyz.com&orderBy=name&offset=0&numResults=1&numInstances=2&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "entitySummary": [
-        {
-            "name"  : "SampleOutput",
-            "type"  : "feed",
-            "status": "RUNNING",
-            "instances": [
-            {
-                "details": "",
-                "endTime": "2013-10-21T14:40:26-07:00",
-                "startTime": "2013-10-21T14:39:56-07:00",
-                "cluster": "primary-cluster",
-                "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-                "status": "RUNNING",
-                "instance": "2012-04-03T07:00Z"
-            },
-            {
-                "details": "",
-                "endTime": "2013-10-21T14:42:27-07:00",
-                "startTime": "2013-10-21T14:41:57-07:00",
-                "cluster": "primary-cluster",
-                "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933397-oozie-rgau-W",
-                "status": "RUNNING",
-                "instance": "2012-04-03T08:00Z"
-            },
-            ]
-        }
-    ]
-    "requestId": "default\/e15bb378-d09f-4911-9df2-5334a45153d2\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntitySuspend.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntitySuspend.twiki b/docs/src/site/twiki/restapi/EntitySuspend.twiki
deleted file mode 100644
index b322b27..0000000
--- a/docs/src/site/twiki/restapi/EntitySuspend.twiki
+++ /dev/null
@@ -1,30 +0,0 @@
----++  POST /api/entities/suspend/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Suspend an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Status of the entity.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/suspend/process/SampleProcess?doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "default\/fe5f2b6c-1f2e-49fc-af3a-342079f0b46b\n",
-    "message": "default\/SampleProcess(process) suspended successfully\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityTouch.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityTouch.twiki b/docs/src/site/twiki/restapi/EntityTouch.twiki
deleted file mode 100644
index 5b58ce2..0000000
--- a/docs/src/site/twiki/restapi/EntityTouch.twiki
+++ /dev/null
@@ -1,31 +0,0 @@
----++ POST  api/entities/touch/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Force updates the entity.
-
----++ Parameters
-   * :entity-type can be feed or process.
-   * :entity-name is name of the feed or process.
-   * skipDryRun : Optional query param, Falcon skips oozie dryrun when value is set to true.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the validation.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/touch/process/SampleProcess?skipDryRun=true&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "touch\/default\/d6aaa328-6836-4818-a212-515bb43d8b86\n\n",
-    "message": "touch\/default\/SampleProcess updated successfully\n\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityUpdate.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityUpdate.twiki b/docs/src/site/twiki/restapi/EntityUpdate.twiki
deleted file mode 100644
index 46b01fc..0000000
--- a/docs/src/site/twiki/restapi/EntityUpdate.twiki
+++ /dev/null
@@ -1,66 +0,0 @@
----++ POST  api/entities/update/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Updates the submitted entity.
-
----++ Parameters
-   * :entity-type can be feed or process.
-   * :entity-name is name of the feed or process.
-   * skipDryRun : Optional query param, Falcon skips oozie dryrun when value is set to true.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the validation.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/update/process/SampleProcess?skipDryRun=false&doAs=joe
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
-<process xmlns="uri:falcon:process:0.1" name="SampleProcess" >
-    <clusters>
-      <cluster name="primary-cluster">
-	<validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" />
-      </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="default" />
-        <property name="ssh.host" value="localhost" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/examples/apps/aggregator" />
-    <retry policy="exp-backoff" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "update\/default\/d6aaa328-6836-4818-a212-515bb43d8b86\n\n",
-    "message": "update\/default\/SampleProcess updated successfully\n\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/EntityValidate.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityValidate.twiki b/docs/src/site/twiki/restapi/EntityValidate.twiki
deleted file mode 100644
index 054b083..0000000
--- a/docs/src/site/twiki/restapi/EntityValidate.twiki
+++ /dev/null
@@ -1,170 +0,0 @@
----++ POST  api/entities/validate/entity-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Validates the submitted entity.
-
----++ Parameters
-   * :entity-type can be cluster, feed or process.
-   * skipDryRun : Optional query param, Falcon skips oozie dryrun when value is set to true.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the validation.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/validate/cluster
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<cluster xmlns="uri:falcon:cluster:0.1" name="primary-cluster" description="Primary Cluster" colo="west-coast">
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:50070" version="1.1.1"/>
-        <interface type="write" endpoint="hdfs://localhost:9000" version="1.1.1"/>
-        <interface type="execute" endpoint="localhost:9001" version="1.1.1"/>
-        <interface type="workflow" endpoint="http://localhost:11000/oozie/" version="4.0.0"/>
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true" version="5.4.3"/>
-    </interfaces>
-    <locations>
-        <location name="staging" path="/apps/falcon/staging"/>
-        <location name="temp" path="/tmp"/>
-        <location name="working" path="/apps/falcon/working"/>
-    </locations>
-</cluster>
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "dd3f6c3a-a6f1-4c50-97fb-3f9a3f698e10",
-    "message": "Validated successfully (CLUSTER) primary-cluster",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/validate/feed?skipDryRun=true
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Hourly sample input data -->
-
-<feed description="sample input data"
-      name="SampleInput" xmlns="uri:falcon:feed:0.1"
-      xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <groups>group</groups>
-    <frequency>hours(1)</frequency>
-    <late-arrival cut-off="hours(6)" />
-    <clusters>
-        <cluster name="primary-cluster" type="source">
-            <!--validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" timezone="UTC" /-->
-            <validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" />
-            <retention limit="months(24)" action="delete" />
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/bootcamp/data/${YEAR}-${MONTH}-${DAY}-${HOUR}/SampleInput" />
-        <location type="stats" path="/projects/bootcamp/stats/SampleInput" />
-        <location type="meta" path="/projects/bootcamp/meta/SampleInput" />
-    </locations>
-
-    <ACL owner="suser" group="users" permission="0755" />
-
-    <schema location="/none" provider="none" />
-</feed>
-</verbatim>
-
----+++ Result
-<verbatim>
-{
-    "requestId": "c85b190e-e653-493a-a863-d62de9c2e3b0",
-    "message": "Validated successfully (FEED) SampleInput",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/validate/feed
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample output data -->
-
-<feed description="sample output data" name="SampleOutput" xmlns="uri:falcon:feed:0.1"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <groups>group</groups>
-    <frequency>hours(1)</frequency>
-    <late-arrival cut-off="hours(6)" />
-    <clusters>
-        <cluster name="primary-cluster" type="source">
-            <!--validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" timezone="UTC" /-->
-            <validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" />
-            <retention limit="months(24)" action="delete" />
-        </cluster>
-    </clusters>
-    <locations>
-        <location type="data" path="/projects/bootcamp/output/${YEAR}-${MONTH}-${DAY}-${HOUR}/SampleOutput" />
-        <location type="stats" path="/projects/bootcamp/stats/SampleOutput" />
-        <location type="meta" path="/projects/bootcamp/meta/SampleOutput" />
-    </locations>
-    <ACL owner="suser" group="users" permission="0755" />
-    <schema location="/none" provider="none" />
-</feed>
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "requestId": "60781732-460e-4c6c-ba86-a75fae574b05",
-    "message": "Validated successfully (FEED) SampleOutput",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/entities/validate/process?skipDryRun=false&doAs=joe
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
-<process xmlns="uri:falcon:process:0.1" name="SampleProcess" >
-    <clusters>
-      <cluster name="primary-cluster">
-	<validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" />
-      </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>hours(1)</frequency>
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-        <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="default" />
-        <property name="ssh.host" value="localhost" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/examples/apps/aggregator" />
-    <retry policy="exp-backoff" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
-
----+++ Result
-<verbatim>
-{
-    "requestId": "e4a965c6-c7a2-41d9-ba08-2e77f1c43f57",
-    "message": "Validated successfully (PROCESS) SampleProcess",
-    "status": "SUCCEEDED"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/FeedInstanceListing.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/FeedInstanceListing.twiki b/docs/src/site/twiki/restapi/FeedInstanceListing.twiki
deleted file mode 100644
index 03f3c57..0000000
--- a/docs/src/site/twiki/restapi/FeedInstanceListing.twiki
+++ /dev/null
@@ -1,46 +0,0 @@
----++ GET /api/instance/listing/feed/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get falcon feed instance availability.
-
----++ Parameters
-   * :entity-name Name of the entity.
-   * start <optional param> Show instances from this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * By default, it is set to (end - (10 * entityFrequency)).
-   * end <optional param> Show instances up to this date. Date format is yyyy-MM-dd'T'HH:mm'Z'.
-      * Default is set to now.
-   * colo <optional param> Colo on which the query should be run.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Feed instance availability status
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/listing/feed/SampleFeed?colo=*&start=2012-04-03T07:00Z&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "size": "450231212222",
-            "creationTime": "1236679827365",
-            "cluster": "primary-cluster",
-            "uri": "/data/SampleFeed/2012-04-03",
-            "status": "AVAILABLE",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/3527038e-8334-4e50-8173-76c4fa430d0b\n",
-    "message": "default\/STATUS\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/FeedLookup.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/FeedLookup.twiki b/docs/src/site/twiki/restapi/FeedLookup.twiki
deleted file mode 100644
index 053182b..0000000
--- a/docs/src/site/twiki/restapi/FeedLookup.twiki
+++ /dev/null
@@ -1,37 +0,0 @@
----++  GET api/entities/lookup/feed
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-
----++ Parameters
-    * path path of the instance for which you want to determine the feed. e.g. /data/project1/2014/10/10/23/
-    Path has to be the complete path and can't be a part of it.
-    * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Returns the name of the feed along with the location type(meta/data/stats) and cluster on which the given path belongs to this feed.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/lookup/feed?path=/data/project1/2014/10/10/23&doAs=joe
-</verbatim>
----+++ Result
-{
-    "feeds":
-    [
-        {
-           "feedName": "My-Feed1",
-           "locationType": "DATA",
-           "clusterName": "My-cluster1"
-        },
-        {
-           "feedName": "My-Feed2",
-           "locationType": "DATA",
-           "clusterName": "My-cluster2"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/FeedSLA.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/FeedSLA.twiki b/docs/src/site/twiki/restapi/FeedSLA.twiki
deleted file mode 100644
index 8760976..0000000
--- a/docs/src/site/twiki/restapi/FeedSLA.twiki
+++ /dev/null
@@ -1,56 +0,0 @@
----++ GET /api/entities/sla-alert/:entity-type
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-<verbatim>
-Since: 0.8
-</verbatim>
-This command lists all the feed instances which have missed sla and are still not available. If a feed instance missed
-sla but is now available, then it will not be reported in results. The purpose of this API is alerting and hence it
- doesn't return feed instances which missed SLA but are available as they don't require any action.
-
----++ Parameters
-   * :entity-type Only valid option is feed.
-   * entity-name <optional param> parameter to restrict results for a particular feed using feed's name.
-   * start <mandatory param> start of the time window for nominal instances, inclusive.
-   * end <mandatory param> end of the time window for nominal instances to be considered, default is treated as current time.
-   * colo <optional param> name of the colo
-
-
----++ Results
-Pending feed instances which missed SLA.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/entities/sla-alert/feed?colo=*&start=2012-04-03T07:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "status":"SUCCEEDED",
-    "message":"default/Success!\n",
-    "requestId":"default/885720178@qtp-495452957-6 - f6e82e9b-d23f-466b-82df-4fb8293ce9cf\n",
-    "instances":[
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:33:00+05:30","tags":"Missed SLA High"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:29:00+05:30","tags":"Missed SLA High"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:35:00+05:30","tags":"Missed SLA Low"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:30:00+05:30","tags":"Missed SLA High"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:34:00+05:30","tags":"Missed SLA High"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:31:00+05:30","tags":"Missed SLA High"},
-            {"cluster":"local","entityName":"out","entityType":"FEED","instanceTime":"2015-09-26T17:32:00+05:30","tags":"Missed SLA High"}
-    ]
-}
-</verbatim>
-
-In case there are no pending instances which have missed sla the response will be like below:
-<verbatim>
-{
-    "status":"SUCCEEDED",
-    "message":"default/Success!\n",
-    "requestId":"default/979808239@qtp-1243851750-3 - 8c7396c0-efe2-43e9-9aea-7ae6afea5fd6\n"
-}
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/Graph.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/Graph.twiki b/docs/src/site/twiki/restapi/Graph.twiki
deleted file mode 100644
index db58d2e..0000000
--- a/docs/src/site/twiki/restapi/Graph.twiki
+++ /dev/null
@@ -1,22 +0,0 @@
----++  GET api/metadata/lineage/serialize
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Dump the graph.
-
----++ Parameters
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Serialize graph to a file configured using *.falcon.graph.serialize.path in Custom startup.properties.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/metadata/lineage/serialize?doAs=joe
-</verbatim>
----+++ Result
-None.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceDependencies.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceDependencies.twiki b/docs/src/site/twiki/restapi/InstanceDependencies.twiki
deleted file mode 100644
index 5641757..0000000
--- a/docs/src/site/twiki/restapi/InstanceDependencies.twiki
+++ /dev/null
@@ -1,49 +0,0 @@
----++ GET /api/instance/dependencies/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Get dependent instances for a particular instance.
-
----++ Parameters
-   * :entity-type Valid options are feed or process.
-   * :entity-name Name of the entity
-   * instanceTime <mandatory param> time of the given instance
-   * colo <optional param> name of the colo
-
-
----++ Results
-Dependent instances for the specified instance
-
----++ Examples
----+++ Rest Call
-<verbatim>
-GET http://localhost:15000/api/instance/dependencies/feed/myFeed?colo=*&instanceTime=2012-04-03T07:00Z
-</verbatim>
----+++ Result
-<verbatim>
-{
-    'status': 'SUCCEEDED',
-    'message': 'default/Success!\n',
-    'dependencies': [
-        {
-            'cluster': 'local',
-            'entityName': 'consumer-process',
-            'entityType': 'PROCESS',
-            'instanceTime': '2014-12-18T00:00Z',
-            'tags': 'Input'
-        },
-        {
-            'cluster': 'local',
-            'entityName': 'producer-process',
-            'entityType': 'PROCESS',
-            'instanceTime': '2014-12-18T00:00Z',
-            'tags': 'Output'
-        }
-    ],
-    'requestId': 'default/1405883107@qtp-1501726962-6-0c2e690f-546b-47b0-a5ee-0365d4522a31\n'
-}
-</verbatim>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/restapi/InstanceKill.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/InstanceKill.twiki b/docs/src/site/twiki/restapi/InstanceKill.twiki
deleted file mode 100644
index eb22945..0000000
--- a/docs/src/site/twiki/restapi/InstanceKill.twiki
+++ /dev/null
@@ -1,44 +0,0 @@
----++  POST /api/instance/kill/:entity-type/:entity-name
-   * <a href="#Description">Description</a>
-   * <a href="#Parameters">Parameters</a>
-   * <a href="#Results">Results</a>
-   * <a href="#Examples">Examples</a>
-
----++ Description
-Kill currently running instance(s) of an entity.
-
----++ Parameters
-   * :entity-type can either be a feed or a process.
-   * :entity-name is name of the entity.
-   * start is the start time of the instance(s) that you want to refer to
-   * end is the end time of the instance(s) that you want to refer to
-   * lifecycle <optional param> can be Eviction/Replication(default) for feed and Execution(default) for process.
-   * doAs <optional query param> allows the current user to impersonate the user passed in doAs when interacting with the Falcon system.
-
----++ Results
-Result of the kill operation.
-
----++ Examples
----+++ Rest Call
-<verbatim>
-POST http://localhost:15000/api/instance/kill/process/SampleProcess?colo=*&start=2012-04-03T07:00Z&end=2014-04-03T07:00Z&doAs=joe
-</verbatim>
----+++ Result
-<verbatim>
-{
-    "instances": [
-        {
-            "details": "",
-            "endTime": "2013-10-21T15:26:59-07:00",
-            "startTime": "2013-10-21T15:19:57-07:00",
-            "cluster": "primary-cluster",
-            "logFile": "http:\/\/localhost:11000\/oozie?job=0000070-131021115933395-oozie-rgau-W",
-            "status": "KILLED",
-            "instance": "2012-04-03T07:00Z"
-        }
-    ],
-    "requestId": "default\/23b3cfee-ee22-40c0-825d-39c322587d5f\n",
-    "message": "default\/KILL\n",
-    "status": "SUCCEEDED"
-}
-</verbatim>


[38/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java b/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
deleted file mode 100644
index e4d9385..0000000
--- a/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.service.ConfigurationChangeListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * An in-memory graph of entities and relationship among themselves.
- */
-public final class EntityGraph implements ConfigurationChangeListener {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EntityGraph.class);
-
-    private static EntityGraph instance = new EntityGraph();
-
-    private Map<Node, Set<Node>> graph = new ConcurrentHashMap<Node, Set<Node>>();
-
-    private EntityGraph() {
-    }
-
-    public static EntityGraph get() {
-        return instance;
-    }
-
-    public Set<Entity> getDependents(Entity entity) throws FalconException {
-        Node entityNode = new Node(entity.getEntityType(), entity.getName());
-        if (graph.containsKey(entityNode)) {
-            ConfigurationStore store = ConfigurationStore.get();
-            Set<Entity> dependents = new HashSet<Entity>();
-            for (Node node : graph.get(entityNode)) {
-                Entity dependentEntity = store.get(node.type, node.name);
-                if (dependentEntity != null) {
-                    dependents.add(dependentEntity);
-                } else {
-                    LOG.error("Dependent entity {} was not found in configuration store.", node);
-                }
-            }
-            return dependents;
-        } else {
-            return null;
-        }
-    }
-
-    @Override
-    public void onAdd(Entity entity) throws FalconException {
-        Map<Node, Set<Node>> nodeEdges = null;
-        switch (entity.getEntityType()) {
-        case PROCESS:
-            nodeEdges = getEdgesFor((Process) entity);
-            break;
-        case FEED:
-            nodeEdges = getEdgesFor((Feed) entity);
-            break;
-        default:
-        }
-        if (nodeEdges == null) {
-            return;
-        }
-        LOG.debug("Adding edges for {}: {}", entity.getName(), nodeEdges);
-
-        for (Map.Entry<Node, Set<Node>> entry : nodeEdges.entrySet()) {
-            LOG.debug("Adding edges : {} for {}", entry.getValue(), entry.getKey());
-            if (graph.containsKey(entry.getKey())) {
-                graph.get(entry.getKey()).addAll(entry.getValue());
-            } else {
-                graph.put(entry.getKey(), entry.getValue());
-            }
-        }
-        LOG.debug("Merged edges to graph {}", entity.getName());
-    }
-
-    @Override
-    public void onRemove(Entity entity) throws FalconException {
-        Map<Node, Set<Node>> nodeEdges = null;
-        switch (entity.getEntityType()) {
-        case PROCESS:
-            nodeEdges = getEdgesFor((Process) entity);
-            break;
-        case FEED:
-            nodeEdges = getEdgesFor((Feed) entity);
-            break;
-        default:
-        }
-        if (nodeEdges == null) {
-            return;
-        }
-
-        for (Map.Entry<Node, Set<Node>> entry : nodeEdges.entrySet()) {
-            if (graph.containsKey(entry.getKey())) {
-                graph.get(entry.getKey()).removeAll(entry.getValue());
-                if (graph.get(entry.getKey()).isEmpty()) {
-                    graph.remove(entry.getKey());
-                }
-            }
-        }
-    }
-
-    @Override
-    public void onChange(Entity oldEntity, Entity newEntity) throws FalconException {
-        onRemove(oldEntity);
-        onAdd(newEntity);
-    }
-
-    @Override
-    public void onReload(Entity entity) throws FalconException {
-        onAdd(entity);
-    }
-
-    private Map<Node, Set<Node>> getEdgesFor(Process process) {
-        Map<Node, Set<Node>> nodeEdges = new HashMap<Node, Set<Node>>();
-        Node processNode = new Node(EntityType.PROCESS, process.getName());
-        nodeEdges.put(processNode, new HashSet<Node>());
-        Set<Node> processEdges = nodeEdges.get(processNode);
-        if (process.getInputs() != null) {
-            for (Input input : process.getInputs().getInputs()) {
-                Node feedNode = new Node(EntityType.FEED, input.getFeed());
-                if (!nodeEdges.containsKey(feedNode)) {
-                    nodeEdges.put(feedNode, new HashSet<Node>());
-                }
-                Set<Node> feedEdges = nodeEdges.get(feedNode);
-                processEdges.add(feedNode);
-                feedEdges.add(processNode);
-            }
-        }
-        if (process.getOutputs() != null) {
-            for (Output output : process.getOutputs().getOutputs()) {
-                Node feedNode = new Node(EntityType.FEED, output.getFeed());
-                if (!nodeEdges.containsKey(feedNode)) {
-                    nodeEdges.put(feedNode, new HashSet<Node>());
-                }
-                Set<Node> feedEdges = nodeEdges.get(feedNode);
-                processEdges.add(feedNode);
-                feedEdges.add(processNode);
-            }
-        }
-
-        for (Cluster cluster : process.getClusters().getClusters()) {
-            Node clusterNode = new Node(EntityType.CLUSTER, cluster.getName());
-            processEdges.add(clusterNode);
-            nodeEdges.put(clusterNode, new HashSet<Node>());
-            nodeEdges.get(clusterNode).add(processNode);
-        }
-
-        return nodeEdges;
-    }
-
-    private Map<Node, Set<Node>> getEdgesFor(Feed feed) {
-        Map<Node, Set<Node>> nodeEdges = new HashMap<Node, Set<Node>>();
-        Node feedNode = new Node(EntityType.FEED, feed.getName());
-        Set<Node> feedEdges = new HashSet<Node>();
-        nodeEdges.put(feedNode, feedEdges);
-
-        for (org.apache.falcon.entity.v0.feed.Cluster cluster : feed.getClusters().getClusters()) {
-            Node clusterNode = new Node(EntityType.CLUSTER, cluster.getName());
-            if (!nodeEdges.containsKey(clusterNode)) {
-                nodeEdges.put(clusterNode, new HashSet<Node>());
-            }
-            Set<Node> clusterEdges = nodeEdges.get(clusterNode);
-            feedEdges.add(clusterNode);
-            clusterEdges.add(feedNode);
-
-            if (FeedHelper.isImportEnabled(cluster)) {
-                Node dbNode = new Node(EntityType.DATASOURCE, FeedHelper.getImportDatasourceName(cluster));
-                if (!nodeEdges.containsKey(dbNode)) {
-                    nodeEdges.put(dbNode, new HashSet<Node>());
-                }
-                Set<Node> dbEdges = nodeEdges.get(dbNode);
-                feedEdges.add(dbNode);
-                dbEdges.add(feedNode);
-            }
-        }
-        return nodeEdges;
-    }
-
-    /**
-     * Node element in the graph.
-     */
-    private static final class Node {
-
-        private final EntityType type;
-        private final String name;
-
-        private Node(EntityType type, String name) {
-            this.type = type;
-            this.name = name;
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) {
-                return true;
-            }
-            if (o == null || getClass() != o.getClass()) {
-                return false;
-            }
-
-            Node node = (Node) o;
-
-            boolean nameEqual = name != null ? !name.equals(node.name) : node.name != null;
-
-            if (nameEqual) {
-                return false;
-            }
-            if (type != node.type) {
-                return false;
-            }
-
-            return true;
-        }
-
-        @Override
-        public int hashCode() {
-            int result = type != null ? type.hashCode() : 0;
-            result = 31 * result + (name != null ? name.hashCode() : 0);
-            return result;
-        }
-
-        @Override
-        public String toString() {
-            return "(" + type + ") " + name;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/v0/EntityIntegrityChecker.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/v0/EntityIntegrityChecker.java b/common/src/main/java/org/apache/falcon/entity/v0/EntityIntegrityChecker.java
deleted file mode 100644
index 4c7e913..0000000
--- a/common/src/main/java/org/apache/falcon/entity/v0/EntityIntegrityChecker.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.Pair;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Helper methods to check integrity of entity.
- */
-public final class EntityIntegrityChecker {
-
-    private EntityIntegrityChecker() {}
-
-    public static Pair<String, EntityType>[] referencedBy(Entity entity) throws FalconException {
-        Set<Entity> deps = EntityGraph.get().getDependents(entity);
-        if (deps == null) {
-            return null;
-        }
-
-        switch (entity.getEntityType()) {
-        case CLUSTER:
-            return filter(deps, EntityType.FEED, EntityType.PROCESS);
-
-        case FEED:
-            return filter(deps, EntityType.PROCESS);
-
-        case DATASOURCE:
-            return filter(deps, EntityType.FEED);
-
-        default:
-            return null;
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    private static Pair<String, EntityType>[] filter(Set<Entity> deps, EntityType... types) {
-        List<Pair<String, EntityType>> filteredSet = new ArrayList<Pair<String, EntityType>>();
-        List<EntityType> validTypes = Arrays.asList(types);
-        for (Entity dep : deps) {
-            if (validTypes.contains(dep.getEntityType())) {
-                filteredSet.add(Pair.of(dep.getName(), dep.getEntityType()));
-            }
-        }
-        return filteredSet.toArray(new Pair[0]);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/v0/UnschedulableEntityException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/v0/UnschedulableEntityException.java b/common/src/main/java/org/apache/falcon/entity/v0/UnschedulableEntityException.java
deleted file mode 100644
index cad196b..0000000
--- a/common/src/main/java/org/apache/falcon/entity/v0/UnschedulableEntityException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.v0;
-
-import org.apache.falcon.FalconException;
-
-/**
- * This exception is thrown when Unschedulable entity
- * like CLUSTER is tried with actions like Schedule, Suspend,
- * Resume.
- */
-public class UnschedulableEntityException extends FalconException {
-
-    private static final long serialVersionUID = -1134342662497698943L;
-
-    public UnschedulableEntityException(Exception e) {
-        super(e);
-    }
-
-    public UnschedulableEntityException(String message, Exception e) {
-        super(message, e);
-    }
-
-    public UnschedulableEntityException(String message) {
-        super(message);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/expression/ExpressionHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/expression/ExpressionHelper.java b/common/src/main/java/org/apache/falcon/expression/ExpressionHelper.java
deleted file mode 100644
index 65aaeba..0000000
--- a/common/src/main/java/org/apache/falcon/expression/ExpressionHelper.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.expression;
-
-import org.apache.commons.el.ExpressionEvaluatorImpl;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.jsp.el.ELException;
-import javax.servlet.jsp.el.ExpressionEvaluator;
-import javax.servlet.jsp.el.FunctionMapper;
-import javax.servlet.jsp.el.VariableResolver;
-import java.lang.reflect.Method;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.Properties;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Helper for evaluating expressions.
- */
-public final class ExpressionHelper implements FunctionMapper, VariableResolver {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ExpressionHelper.class);
-    private static final ExpressionHelper INSTANCE = new ExpressionHelper();
-
-    private static final ThreadLocal<Properties> THREAD_VARIABLES = new ThreadLocal<Properties>();
-
-    private static final Pattern SYS_PROPERTY_PATTERN = Pattern.compile("\\$\\{[A-Za-z0-9_.]+\\}");
-
-    private static final ExpressionEvaluator EVALUATOR = new ExpressionEvaluatorImpl();
-    private static final ExpressionHelper RESOLVER = ExpressionHelper.get();
-
-    public static final ThreadLocal<SimpleDateFormat> FORMATTER = new ThreadLocal<SimpleDateFormat>() {
-        @Override
-        protected SimpleDateFormat initialValue() {
-            SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'");
-            format.setTimeZone(TimeZone.getTimeZone("UTC"));
-            return format;
-        }
-    };
-
-    public static ExpressionHelper get() {
-        return INSTANCE;
-    }
-
-    private enum DayOfWeek {
-        SUN, MON, TUE, WED, THU, FRI, SAT
-    }
-
-    private ExpressionHelper() {
-    }
-
-    public <T> T evaluate(String expression, Class<T> clazz) throws FalconException {
-        return evaluateFullExpression("${" + expression + "}", clazz);
-    }
-
-    @SuppressWarnings("unchecked")
-    public <T> T evaluateFullExpression(String expression, Class<T> clazz) throws FalconException {
-        try {
-            return (T) EVALUATOR.evaluate(expression, clazz, RESOLVER, RESOLVER);
-        } catch (ELException e) {
-            throw new FalconException("Unable to evaluate " + expression, e);
-        }
-    }
-
-    @Override
-    public Method resolveFunction(String prefix, String name) {
-        for (Method method : ExpressionHelper.class.getDeclaredMethods()) {
-            if (method.getName().equals(name)) {
-                return method;
-            }
-        }
-        throw new UnsupportedOperationException("Not found " + prefix + ":" + name);
-    }
-
-    public void setPropertiesForVariable(Properties properties) {
-        THREAD_VARIABLES.set(properties);
-    }
-
-    @Override
-    public Object resolveVariable(String field) {
-        return THREAD_VARIABLES.get().get(field);
-    }
-
-    private static ThreadLocal<Date> referenceDate = new ThreadLocal<Date>();
-
-    public static void setReferenceDate(Date date) {
-        referenceDate.set(date);
-        Properties variables = getTimeVariables(date, TimeZone.getTimeZone("UTC"));
-        THREAD_VARIABLES.set(variables);
-    }
-
-    public static Properties getTimeVariables(Date date, TimeZone tz) {
-        Properties vars = new Properties();
-        Calendar cal = Calendar.getInstance(tz);
-        cal.setTime(date);
-        vars.put(FeedDataPath.VARS.YEAR.name(), String.format("%04d", cal.get(Calendar.YEAR)));
-        vars.put(FeedDataPath.VARS.MONTH.name(), String.format("%02d", (cal.get(Calendar.MONTH) + 1)));
-        vars.put(FeedDataPath.VARS.DAY.name(), String.format("%02d", cal.get(Calendar.DAY_OF_MONTH)));
-        vars.put(FeedDataPath.VARS.HOUR.name(), String.format("%02d", cal.get(Calendar.HOUR_OF_DAY)));
-        vars.put(FeedDataPath.VARS.MINUTE.name(), String.format("%02d", cal.get(Calendar.MINUTE)));
-        return vars;
-    }
-
-    private static int getDayOffset(String weekDayName) {
-        int day;
-        Calendar nominalTime = Calendar.getInstance();
-        nominalTime.setTimeZone(TimeZone.getTimeZone("UTC"));
-        nominalTime.setTime(referenceDate.get());
-        int currentWeekDay = nominalTime.get(Calendar.DAY_OF_WEEK);
-        int weekDay = DayOfWeek.valueOf(weekDayName).ordinal() + 1; //to map to Calendar.SUNDAY ...
-        day = weekDay - currentWeekDay;
-        if (weekDay > currentWeekDay) {
-            day = day - 7;
-        }
-        return day;
-    }
-
-    @edu.umd.cs.findbugs.annotations.SuppressWarnings({"SF_SWITCH_FALLTHROUGH"})
-    private static Date getRelative(Date date, int boundary, int month, int day, int hour, int minute) {
-        Calendar dsInstanceCal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
-        dsInstanceCal.setTime(date);
-        switch (boundary) {
-        case Calendar.YEAR:
-            dsInstanceCal.set(Calendar.MONTH, 0);
-        case Calendar.MONTH:
-            dsInstanceCal.set(Calendar.DAY_OF_MONTH, 1);
-        case Calendar.DAY_OF_MONTH:
-            dsInstanceCal.set(Calendar.HOUR_OF_DAY, 0);
-        case Calendar.HOUR:
-            dsInstanceCal.set(Calendar.MINUTE, 0);
-            dsInstanceCal.set(Calendar.SECOND, 0);
-            dsInstanceCal.set(Calendar.MILLISECOND, 0);
-            break;
-        case Calendar.SECOND:
-            break;
-        default:
-            throw new IllegalArgumentException("Invalid boundary " + boundary);
-        }
-
-        dsInstanceCal.add(Calendar.YEAR, 0);
-        dsInstanceCal.add(Calendar.MONTH, month);
-        dsInstanceCal.add(Calendar.DAY_OF_MONTH, day);
-        dsInstanceCal.add(Calendar.HOUR_OF_DAY, hour);
-        dsInstanceCal.add(Calendar.MINUTE, minute);
-        return dsInstanceCal.getTime();
-    }
-
-    public static Date now(int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.SECOND, 0, 0, hour, minute);
-    }
-
-    public static Date today(int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.DAY_OF_MONTH, 0, 0, hour, minute);
-    }
-
-    public static Date yesterday(int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.DAY_OF_MONTH, 0, -1, hour, minute);
-    }
-
-    public static Date currentMonth(int day, int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.MONTH, 0, day, hour, minute);
-    }
-
-    public static Date lastMonth(int day, int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.MONTH, -1, day, hour, minute);
-    }
-
-    public static Date currentWeek(String weekDay, int hour, int minute) {
-        int day = getDayOffset(weekDay);
-        return getRelative(referenceDate.get(), Calendar.DAY_OF_MONTH, 0, day, hour, minute);
-    }
-
-    public static Date lastWeek(String weekDay, int hour, int minute) {
-        int day = getDayOffset(weekDay);
-        return getRelative(referenceDate.get(), Calendar.DAY_OF_MONTH, 0, day - 7, hour, minute);
-    }
-
-    public static Date currentYear(int month, int day, int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.YEAR, month, day, hour, minute);
-    }
-
-    public static Date lastYear(int month, int day, int hour, int minute) {
-        return getRelative(referenceDate.get(), Calendar.YEAR, month - 12, day, hour, minute);
-    }
-
-    public static Date latest(int n) {
-        //by pass Falcon validations
-        return referenceDate.get();
-    }
-
-    public static Date future(int n, int limit) {
-        //by pass Falcon validations
-        return referenceDate.get();
-    }
-
-    public static long hours(int val) {
-        return TimeUnit.HOURS.toMillis(val);
-    }
-
-    public static long minutes(int val) {
-        return TimeUnit.MINUTES.toMillis(val);
-    }
-
-    public static long days(int val) {
-        return TimeUnit.DAYS.toMillis(val);
-    }
-
-    public static long months(int val) {
-        return val * days(31);
-    }
-
-    public static long years(int val) {
-        return val * days(366);
-    }
-
-    public static String substitute(String originalValue) {
-        return substitute(originalValue, System.getProperties());
-    }
-
-    public static String substitute(String originalValue, Properties properties) {
-        Matcher envVarMatcher = SYS_PROPERTY_PATTERN.matcher(originalValue);
-        while (envVarMatcher.find()) {
-            String envVar = originalValue.substring(envVarMatcher.start() + 2,
-                    envVarMatcher.end() - 1);
-            String envVal = properties.getProperty(envVar, System.getenv(envVar));
-
-            envVar = "\\$\\{" + envVar + "\\}";
-            if (envVal != null) {
-                originalValue = originalValue.replaceAll(envVar, Matcher.quoteReplacement(envVal));
-                envVarMatcher = SYS_PROPERTY_PATTERN.matcher(originalValue);
-            }
-        }
-        return originalValue;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/group/FeedGroup.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/group/FeedGroup.java b/common/src/main/java/org/apache/falcon/group/FeedGroup.java
deleted file mode 100644
index d288925..0000000
--- a/common/src/main/java/org/apache/falcon/group/FeedGroup.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.group;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.LocationType;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Matcher;
-
-/**
- * Group, which represents a logical group of feeds which can belong to this
- * group.
- */
-public class FeedGroup {
-
-    public FeedGroup(String group, Frequency frequency, String path) {
-        this.name = group;
-        this.frequency = frequency;
-        this.datePattern = getDatePattern(path);
-        this.feeds = Collections
-                .newSetFromMap(new ConcurrentHashMap<String, Boolean>());
-    }
-
-    public static String getDatePattern(String path) {
-        Matcher matcher = FeedDataPath.PATTERN.matcher(path);
-        List<String> fields = new ArrayList<String>();
-        while (matcher.find()) {
-            String var = path.substring(matcher.start(), matcher.end());
-            fields.add(var);
-        }
-        Collections.sort(fields);
-        return fields.toString();
-    }
-
-    private String name;
-    private Frequency frequency;
-    private String datePattern;
-    private Set<String> feeds;
-
-    public Set<String> getFeeds() {
-        return feeds;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (obj == null || !(obj instanceof FeedGroup)) {
-            return false;
-        }
-        FeedGroup group = (FeedGroup) obj;
-        return (this.name.equals(group.getName())
-                && this.frequency.equals(group.frequency)
-                && this.datePattern
-                .equals(group.datePattern));
-
-    }
-
-    @Override
-    public int hashCode() {
-        return 127 * name.hashCode() + 31 * frequency.hashCode() + datePattern.hashCode();
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public Frequency getFrequency() {
-        return frequency;
-    }
-
-    public String getDatePattern() {
-        return datePattern;
-    }
-
-    public boolean canContainFeed(org.apache.falcon.entity.v0.feed.Feed feed) throws FalconException {
-        return this.frequency.equals(feed.getFrequency())
-                && this.datePattern.equals(getDatePattern(
-                    FeedHelper.createStorage(feed).getUriTemplate(LocationType.DATA)));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/group/FeedGroupMap.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/group/FeedGroupMap.java b/common/src/main/java/org/apache/falcon/group/FeedGroupMap.java
deleted file mode 100644
index a832366..0000000
--- a/common/src/main/java/org/apache/falcon/group/FeedGroupMap.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.group;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.service.ConfigurationChangeListener;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Has 2 way mappings from feed to group and group to feed.
- */
-public final class FeedGroupMap implements ConfigurationChangeListener {
-
-    private static final FeedGroupMap INSTANCE = new FeedGroupMap();
-    private Map<String, FeedGroup> groupsMapping = new ConcurrentHashMap<String, FeedGroup>();
-
-    private FeedGroupMap() {
-        // singleton
-    }
-
-    public static FeedGroupMap get() {
-        return INSTANCE;
-    }
-
-    public Map<String, FeedGroup> getGroupsMapping() {
-        return Collections.unmodifiableMap(groupsMapping);
-    }
-
-    @Override
-    public void onAdd(Entity entity) throws FalconException {
-
-        if (entity.getEntityType().equals(EntityType.FEED)) {
-            Feed feed = (Feed) entity;
-            if (feed.getGroups() == null || feed.getGroups().equals("")) {
-                return;
-            }
-            Set<FeedGroup> groupSet = getGroups(feed);
-            addGroups(feed.getName(), groupSet);
-        }
-    }
-
-    @Override
-    public void onRemove(Entity entity) throws FalconException {
-        if (entity.getEntityType().equals(EntityType.FEED)) {
-            Feed feed = (Feed) entity;
-            if (StringUtils.isEmpty(feed.getGroups())) {
-                return;
-            }
-            String[] groups = feed.getGroups().split(",");
-            for (String group : groups) {
-                groupsMapping.get(group).getFeeds().remove(entity.getName());
-                if (groupsMapping.get(group).getFeeds().size() == 0) {
-                    groupsMapping.remove(group);
-                }
-            }
-
-        }
-    }
-
-    @Override
-    public void onChange(Entity oldEntity, Entity newEntity)
-        throws FalconException {
-
-        onRemove(oldEntity);
-        onAdd(newEntity);
-    }
-
-    @Override
-    public void onReload(Entity entity) throws FalconException {
-        onAdd(entity);
-    }
-
-    private void addGroups(String feed, Set<FeedGroup> groups) {
-        for (FeedGroup group : groups) {
-            if (groupsMapping.containsKey(group.getName())) {
-                groupsMapping.get(group.getName()).getFeeds().add(feed);
-            } else {
-                group.getFeeds().add(feed);
-                groupsMapping.put(group.getName(), group);
-            }
-        }
-    }
-
-    public Set<FeedGroup> getGroups(String groups, Frequency frequency, String path) {
-        Set<FeedGroup> groupSet = new HashSet<FeedGroup>();
-        String[] groupArray = groups.split(",");
-        for (String group : groupArray) {
-            groupSet.add(new FeedGroup(group, frequency, path));
-        }
-        return groupSet;
-    }
-
-    public Set<FeedGroup> getGroups(org.apache.falcon.entity.v0.feed.Feed feed) throws FalconException {
-        return getGroups(feed.getGroups(), feed.getFrequency(),
-                FeedHelper.createStorage(feed).getUriTemplate(LocationType.DATA));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java b/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
deleted file mode 100644
index e33d353..0000000
--- a/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hadoop;
-
-import org.apache.commons.lang.Validate;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.security.SecurityUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.PrivilegedExceptionAction;
-
-/**
- * A factory implementation to dole out FileSystem handles based on the logged in user.
- */
-public final class HadoopClientFactory {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HadoopClientFactory.class);
-
-    public static final String FS_DEFAULT_NAME_KEY = CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-    public static final String MR_JT_ADDRESS_KEY = "mapreduce.jobtracker.address";
-    public static final String YARN_RM_ADDRESS_KEY = "yarn.resourcemanager.address";
-
-    public static final FsPermission READ_EXECUTE_PERMISSION =
-            new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE);
-    public static final FsPermission ALL_PERMISSION =
-            new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
-
-    private static final HadoopClientFactory INSTANCE = new HadoopClientFactory();
-
-    private HadoopClientFactory() {
-    }
-
-    public static HadoopClientFactory get() {
-        return INSTANCE;
-    }
-
-    /**
-     * This method is only used by Falcon internally to talk to the config store on HDFS.
-     *
-     * @param uri file system URI for config store.
-     * @return FileSystem created with the provided proxyUser/group.
-     * @throws org.apache.falcon.FalconException
-     *          if the filesystem could not be created.
-     */
-    public FileSystem createFalconFileSystem(final URI uri) throws FalconException {
-        Validate.notNull(uri, "uri cannot be null");
-
-        try {
-            Configuration conf = new Configuration();
-            if (UserGroupInformation.isSecurityEnabled()) {
-                conf.set(SecurityUtil.NN_PRINCIPAL, StartupProperties.get().getProperty(SecurityUtil.NN_PRINCIPAL));
-            }
-
-            return createFileSystem(UserGroupInformation.getLoginUser(), uri, conf);
-        } catch (IOException e) {
-            throw new FalconException("Exception while getting FileSystem for: " + uri, e);
-        }
-    }
-
-    /**
-     * This method is only used by Falcon internally to talk to the config store on HDFS.
-     *
-     * @param conf configuration.
-     * @return FileSystem created with the provided proxyUser/group.
-     * @throws org.apache.falcon.FalconException
-     *          if the filesystem could not be created.
-     */
-    public FileSystem createFalconFileSystem(final Configuration conf)
-        throws FalconException {
-        Validate.notNull(conf, "configuration cannot be null");
-
-        String nameNode = getNameNode(conf);
-        try {
-            return createFileSystem(UserGroupInformation.getLoginUser(), new URI(nameNode), conf);
-        } catch (URISyntaxException e) {
-            throw new FalconException("Exception while getting FileSystem for: " + nameNode, e);
-        } catch (IOException e) {
-            throw new FalconException("Exception while getting FileSystem for: " + nameNode, e);
-        }
-    }
-
-    /**
-     * Return a FileSystem created with the authenticated proxy user for the specified conf.
-     *
-     * @param conf Configuration with all necessary information to create the FileSystem.
-     * @return FileSystem created with the provided proxyUser/group.
-     * @throws org.apache.falcon.FalconException
-     *          if the filesystem could not be created.
-     */
-    public FileSystem createProxiedFileSystem(final Configuration conf)
-        throws FalconException {
-        Validate.notNull(conf, "configuration cannot be null");
-
-        String nameNode = getNameNode(conf);
-        try {
-            return createProxiedFileSystem(new URI(nameNode), conf);
-        } catch (URISyntaxException e) {
-            throw new FalconException("Exception while getting FileSystem for: " + nameNode, e);
-        }
-    }
-
-    private static String getNameNode(Configuration conf) {
-        return conf.get(FS_DEFAULT_NAME_KEY);
-    }
-
-    /**
-     * This method is called from with in a workflow execution context.
-     *
-     * @param uri uri
-     * @return file system handle
-     * @throws FalconException
-     */
-    public FileSystem createProxiedFileSystem(final URI uri) throws FalconException {
-        return createProxiedFileSystem(uri, new Configuration());
-    }
-
-    public FileSystem createProxiedFileSystem(final URI uri,
-                                              final Configuration conf) throws FalconException {
-        Validate.notNull(uri, "uri cannot be null");
-
-        try {
-            return createFileSystem(CurrentUser.getProxyUGI(), uri, conf);
-        } catch (IOException e) {
-            throw new FalconException("Exception while getting FileSystem for proxy: "
-                + CurrentUser.getUser(), e);
-        }
-    }
-
-    /**
-     * Return a FileSystem created with the provided user for the specified URI.
-     *
-     * @param ugi user group information
-     * @param uri  file system URI.
-     * @param conf Configuration with all necessary information to create the FileSystem.
-     * @return FileSystem created with the provided user/group.
-     * @throws org.apache.falcon.FalconException
-     *          if the filesystem could not be created.
-     */
-    @SuppressWarnings("ResultOfMethodCallIgnored")
-    public FileSystem createFileSystem(UserGroupInformation ugi, final URI uri,
-                                       final Configuration conf) throws FalconException {
-        Validate.notNull(ugi, "ugi cannot be null");
-        Validate.notNull(conf, "configuration cannot be null");
-
-        try {
-            if (UserGroupInformation.isSecurityEnabled()) {
-                ugi.checkTGTAndReloginFromKeytab();
-            }
-        } catch (IOException ioe) {
-            throw new FalconException("Exception while getting FileSystem. Unable to check TGT for user "
-                    + ugi.getShortUserName(), ioe);
-        }
-
-        validateNameNode(uri, conf);
-
-        try {
-            // prevent falcon impersonating falcon, no need to use doas
-            final String proxyUserName = ugi.getShortUserName();
-            if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
-                LOG.info("Creating FS for the login user {}, impersonation not required",
-                    proxyUserName);
-                return FileSystem.get(uri, conf);
-            }
-
-            LOG.info("Creating FS impersonating user {}", proxyUserName);
-            return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
-                public FileSystem run() throws Exception {
-                    return FileSystem.get(uri, conf);
-                }
-            });
-        } catch (InterruptedException ex) {
-            throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
-        } catch (IOException ex) {
-            throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
-        }
-    }
-
-    /**
-     * This method validates if the execute url is able to reach the MR endpoint.
-     *
-     * @param executeUrl jt url or RM url
-     * @throws IOException
-     */
-    public void validateJobClient(String executeUrl) throws IOException {
-        final JobConf jobConf = new JobConf();
-        jobConf.set(MR_JT_ADDRESS_KEY, executeUrl);
-        jobConf.set(YARN_RM_ADDRESS_KEY, executeUrl);
-
-        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
-        try {
-            JobClient jobClient = loginUser.doAs(new PrivilegedExceptionAction<JobClient>() {
-                public JobClient run() throws Exception {
-                    return new JobClient(jobConf);
-                }
-            });
-
-            jobClient.getClusterStatus().getMapTasks();
-        } catch (InterruptedException e) {
-            throw new IOException("Exception creating job client:" + e.getMessage(), e);
-        }
-    }
-
-    public static FsPermission getDirDefaultPermission(Configuration conf) {
-        return getDirDefault().applyUMask(FsPermission.getUMask(conf));
-    }
-
-    public static FsPermission getFileDefaultPermission(Configuration conf) {
-        return getFileDefault().applyUMask(FsPermission.getUMask(conf));
-    }
-
-    public static FsPermission getDirDefault() {
-        return new FsPermission((short)511);
-    }
-
-    public static FsPermission getFileDefault() {
-        return new FsPermission((short)438);
-    }
-
-    public static void mkdirsWithDefaultPerms(FileSystem fs, Path path) throws IOException {
-        mkdirs(fs, path, getDirDefaultPermission(fs.getConf()));
-    }
-
-    public static void mkdirs(FileSystem fs, Path path,
-                              FsPermission permission) throws IOException {
-        if (!FileSystem.mkdirs(fs, path, permission)) {
-            throw new IOException("mkdir failed for " + path);
-        }
-    }
-
-    private void validateNameNode(URI uri, Configuration conf) throws FalconException {
-        String nameNode = uri.getAuthority();
-        if (nameNode == null) {
-            nameNode = getNameNode(conf);
-            if (nameNode != null) {
-                try {
-                    new URI(nameNode).getAuthority();
-                } catch (URISyntaxException ex) {
-                    throw new FalconException("Exception while getting FileSystem", ex);
-                }
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/AbstractPolicyBuilderFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/AbstractPolicyBuilderFactory.java b/common/src/main/java/org/apache/falcon/lifecycle/AbstractPolicyBuilderFactory.java
deleted file mode 100644
index 5bcc2f8..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/AbstractPolicyBuilderFactory.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.lifecycle;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Abstract factory class for feed lifecycle policy builders.
- */
-public abstract class AbstractPolicyBuilderFactory {
-
-    public abstract PolicyBuilder getPolicyBuilder(String policyName) throws FalconException;
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/FeedLifecycleStage.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/FeedLifecycleStage.java b/common/src/main/java/org/apache/falcon/lifecycle/FeedLifecycleStage.java
deleted file mode 100644
index 833ad04..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/FeedLifecycleStage.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.lifecycle;
-
-/**
- * Enum for valid lifecycle stages for the feed.
- */
-public enum FeedLifecycleStage {
-
-    RETENTION("AgeBasedDelete");
-
-    private String defaultPolicyName;
-
-    private FeedLifecycleStage(String defaultPolicyName) {
-        this.defaultPolicyName = defaultPolicyName;
-    }
-
-    public String getDefaultPolicyName() {
-        return defaultPolicyName;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/LifecyclePolicy.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/LifecyclePolicy.java b/common/src/main/java/org/apache/falcon/lifecycle/LifecyclePolicy.java
deleted file mode 100644
index be4e68c..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/LifecyclePolicy.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.lifecycle;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.hadoop.fs.Path;
-
-import java.util.Properties;
-
-/**
- * Interface for all policies in feed lifecycle.
- */
-public interface LifecyclePolicy {
-
-    /**
-     * Returns the name of the policy. Name of policy must be unique as it is used as an identifier.
-     * @return name of the policy
-     */
-    String getName();
-
-    /**
-     * Returns the stage to which the policy belongs.
-     * @return stage to which the policy belongs.
-     */
-    FeedLifecycleStage getStage();
-
-    /**
-     * Validates the configurations as per this policy.
-     * @param feed Parent feed for which the policy is configured.
-     * @param clusterName cluster to be used as context for validation.
-     * @throws FalconException
-     */
-    void validate(Feed feed, String clusterName) throws FalconException;
-
-    /**
-     * Builds workflow engine artifacts.
-     * @param cluster cluster to be used as context
-     * @param buildPath base path to be used for storing the artifacts.
-     * @param feed Parent feed.
-     * @return Properties to be passed to the caller e.g. bundle in case of oozie workflow engine.
-     * @throws FalconException
-     */
-    Properties build(Cluster cluster, Path buildPath, Feed feed) throws FalconException;
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/PolicyBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/PolicyBuilder.java b/common/src/main/java/org/apache/falcon/lifecycle/PolicyBuilder.java
deleted file mode 100644
index 5e5055b..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/PolicyBuilder.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.lifecycle;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.hadoop.fs.Path;
-
-import java.util.Properties;
-
-/**
- * Interface to be implemented by all policy builders for a lifecycle policy.
- * A Builder builds workflow engine specific artifacts for a policy.
- */
-public interface PolicyBuilder {
-
-    Properties build(Cluster cluster, Path buildPath, Feed feed) throws FalconException;
-
-    String getPolicyName();
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/retention/AgeBasedDelete.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/retention/AgeBasedDelete.java b/common/src/main/java/org/apache/falcon/lifecycle/retention/AgeBasedDelete.java
deleted file mode 100644
index 8d735f9..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/retention/AgeBasedDelete.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.lifecycle.retention;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.parser.ValidationException;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.feed.RetentionStage;
-import org.apache.falcon.entity.v0.feed.Sla;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.util.StartupProperties;
-
-import java.util.Date;
-
-/**
- * Retention policy which deletes all instances of instance time older than a given time.
- * It will create the workflow and coordinators for this policy.
- */
-public class AgeBasedDelete extends RetentionPolicy {
-
-    public static final String LIMIT_PROPERTY_NAME = "retention.policy.agebaseddelete.limit";
-
-    @Override
-    public void validate(Feed feed, String clusterName) throws FalconException {
-        // validate that it is a valid cluster
-        Cluster cluster = FeedHelper.getCluster(feed, clusterName);
-        Frequency retentionLimit = getRetentionLimit(feed, clusterName);
-        if (cluster != null) {
-            validateLimitWithSla(feed, cluster, retentionLimit.toString());
-            validateLimitWithLateData(feed, cluster, retentionLimit.toString());
-            String lifecycleEngine = StartupProperties.get().getProperty("lifecycle.engine.impl",
-                    "org.apache.falcon.lifecycle.engine.oozie.OoziePolicyBuilderFactory");
-            if ("org.apache.falcon.lifecycle.engine.oozie.OoziePolicyBuilderFactory".equals(lifecycleEngine)) {
-                validateRetentionFrequencyForOozie(feed, clusterName);
-            }
-        }
-    }
-
-
-    private void validateRetentionFrequencyForOozie(Feed feed, String clusterName) throws FalconException {
-        // retention shouldn't be more frequent than hours(1) for Oozie Builders.
-        Frequency retentionFrequency = FeedHelper.getLifecycleRetentionFrequency(feed, clusterName);
-        if (retentionFrequency.getTimeUnit() == Frequency.TimeUnit.minutes
-                && retentionFrequency.getFrequencyAsInt() < 60) {
-            throw new ValidationException("Feed Retention can not be more frequent than hours(1)");
-        }
-    }
-
-    private void validateLimitWithLateData(Feed feed, Cluster cluster, String retention) throws FalconException {
-        ExpressionHelper evaluator = ExpressionHelper.get();
-        long retentionPeriod = evaluator.evaluate(retention, Long.class);
-
-        if (feed.getLateArrival() != null) {
-            String feedCutoff = feed.getLateArrival().getCutOff().toString();
-            long feedCutOffPeriod = evaluator.evaluate(feedCutoff, Long.class);
-            if (retentionPeriod < feedCutOffPeriod) {
-                throw new ValidationException(
-                        "Feed's retention limit: " + retention + " of referenced cluster " + cluster.getName()
-                                + " should be more than feed's late arrival cut-off period: " + feedCutoff
-                                + " for feed: " + feed.getName());
-            }
-        }
-    }
-
-    private void validateLimitWithSla(Feed feed, Cluster cluster, String retentionExpression) throws FalconException {
-        // test that slaHigh is less than retention
-        Sla clusterSla = FeedHelper.getSLA(cluster, feed);
-        if (clusterSla != null) {
-            ExpressionHelper evaluator = ExpressionHelper.get();
-            ExpressionHelper.setReferenceDate(new Date());
-
-            Frequency slaHighExpression = clusterSla.getSlaHigh();
-            Date slaHigh = new Date(evaluator.evaluate(slaHighExpression.toString(), Long.class));
-
-            Date retention = new Date(evaluator.evaluate(retentionExpression, Long.class));
-            if (slaHigh.after(retention)) {
-                throw new ValidationException("slaHigh of Feed: " + slaHighExpression
-                        + " is greater than retention of the feed: " + retentionExpression
-                        + " for cluster: " + cluster.getName()
-                );
-            }
-        }
-    }
-
-    public Frequency getRetentionLimit(Feed feed, String clusterName) throws FalconException {
-        RetentionStage retention = FeedHelper.getRetentionStage(feed, clusterName);
-        if (retention != null) {
-            String limit = null;
-            for (Property property : retention.getProperties().getProperties()) {
-                if (StringUtils.equals(property.getName(), LIMIT_PROPERTY_NAME)) {
-                    limit = property.getValue();
-                }
-            }
-            if (limit == null) {
-                throw new FalconException("Property: " + LIMIT_PROPERTY_NAME + " is required for "
-                        + getName() + " policy.");
-            }
-            try {
-                return new Frequency(limit);
-            } catch (IllegalArgumentException e) {
-                throw new FalconException("Invalid value for property: " + LIMIT_PROPERTY_NAME + ", should be a valid "
-                        + "frequency e.g. hours(2)", e);
-            }
-        } else {
-            throw new FalconException("Cluster " + clusterName + " doesn't contain retention stage");
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/lifecycle/retention/RetentionPolicy.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/lifecycle/retention/RetentionPolicy.java b/common/src/main/java/org/apache/falcon/lifecycle/retention/RetentionPolicy.java
deleted file mode 100644
index 7fd6175..0000000
--- a/common/src/main/java/org/apache/falcon/lifecycle/retention/RetentionPolicy.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.lifecycle.retention;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.lifecycle.AbstractPolicyBuilderFactory;
-import org.apache.falcon.lifecycle.FeedLifecycleStage;
-import org.apache.falcon.lifecycle.LifecyclePolicy;
-import org.apache.falcon.lifecycle.PolicyBuilder;
-import org.apache.falcon.workflow.WorkflowEngineFactory;
-import org.apache.hadoop.fs.Path;
-
-import java.util.Properties;
-
-/**
- * All retention policies must implement this interface.
- */
-public abstract class RetentionPolicy implements LifecyclePolicy {
-
-    @Override
-    public String getName() {
-        return this.getClass().getSimpleName();
-    }
-
-    @Override
-    public FeedLifecycleStage getStage() {
-        return FeedLifecycleStage.RETENTION;
-    }
-
-    @Override
-    public Properties build(Cluster cluster, Path buildPath, Feed feed) throws FalconException {
-        AbstractPolicyBuilderFactory factory = WorkflowEngineFactory.getLifecycleEngine();
-        PolicyBuilder builder = factory.getPolicyBuilder(getName());
-        return builder.build(cluster, buildPath, feed);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/EntityRelationshipGraphBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/EntityRelationshipGraphBuilder.java b/common/src/main/java/org/apache/falcon/metadata/EntityRelationshipGraphBuilder.java
deleted file mode 100644
index 25bbf0c..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/EntityRelationshipGraphBuilder.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.Vertex;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.ProcessHelper;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.entity.v0.process.Workflow;
-import org.apache.falcon.workflow.WorkflowExecutionArgs;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Entity Metadata relationship mapping helper.
- */
-public class EntityRelationshipGraphBuilder extends RelationshipGraphBuilder {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EntityRelationshipGraphBuilder.class);
-
-
-    public EntityRelationshipGraphBuilder(Graph graph, boolean preserveHistory) {
-        super(graph, preserveHistory);
-    }
-
-    public void addEntity(Entity entity) {
-        EntityType entityType = entity.getEntityType();
-        switch (entityType) {
-        case CLUSTER:
-            addClusterEntity((Cluster) entity);
-            break;
-        case PROCESS:
-            addProcessEntity((Process) entity);
-            break;
-        case FEED:
-            addFeedEntity((Feed) entity);
-            break;
-        case DATASOURCE:
-            addDatasourceEntity((Datasource) entity);
-            break;
-
-        default:
-            throw new IllegalArgumentException("Invalid EntityType " + entityType);
-        }
-    }
-
-    public void addClusterEntity(Cluster clusterEntity) {
-        LOG.info("Adding cluster entity: {}", clusterEntity.getName());
-        Vertex clusterVertex = addVertex(clusterEntity.getName(), RelationshipType.CLUSTER_ENTITY);
-
-        addUserRelation(clusterVertex);
-        addColoRelation(clusterEntity.getColo(), clusterVertex);
-        addDataClassification(clusterEntity.getTags(), clusterVertex);
-    }
-
-    public void addFeedEntity(Feed feed) {
-        LOG.info("Adding feed entity: {}", feed.getName());
-        Vertex feedVertex = addVertex(feed.getName(), RelationshipType.FEED_ENTITY);
-
-        addUserRelation(feedVertex);
-        addDataClassification(feed.getTags(), feedVertex);
-        addGroups(feed.getGroups(), feedVertex);
-
-        for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
-            if (ClusterType.TARGET != feedCluster.getType()) {
-                addRelationToCluster(feedVertex, feedCluster.getName(), RelationshipLabel.FEED_CLUSTER_EDGE);
-            }
-        }
-
-        for (org.apache.falcon.entity.v0.feed.Cluster feedCluster : feed.getClusters().getClusters()) {
-            if (FeedHelper.isImportEnabled(feedCluster)) {
-                addRelationToDatasource(feedVertex, FeedHelper.getImportDatasourceName(feedCluster),
-                        RelationshipLabel.DATASOURCE_IMPORT_EDGE);
-            }
-        }
-    }
-
-    public void addDatasourceEntity(Datasource dsEntity) {
-        LOG.info("Adding datasource entity: {}", dsEntity.getName());
-        Vertex dsVertex = addVertex(dsEntity.getName(), RelationshipType.DATASOURCE_ENTITY);
-
-        addUserRelation(dsVertex);
-        addColoRelation(dsEntity.getColo(), dsVertex);
-        addDataClassification(dsEntity.getTags(), dsVertex);
-    }
-
-
-    public void updateEntity(Entity oldEntity, Entity newEntity) {
-        EntityType entityType = oldEntity.getEntityType();
-        switch (entityType) {
-        case CLUSTER:
-            // a cluster cannot be updated
-            break;
-        case PROCESS:
-            updateProcessEntity((Process) oldEntity, (Process) newEntity);
-            break;
-        case FEED:
-            updateFeedEntity((Feed) oldEntity, (Feed) newEntity);
-            break;
-        default:
-            throw new IllegalArgumentException("Invalid EntityType " + entityType);
-        }
-    }
-
-
-
-    public void updateFeedEntity(Feed oldFeed, Feed newFeed) {
-        LOG.info("Updating feed entity: {}", newFeed.getName());
-        Vertex feedEntityVertex = findVertex(oldFeed.getName(), RelationshipType.FEED_ENTITY);
-        if (feedEntityVertex == null) {
-            LOG.error("Illegal State: Feed entity vertex must exist for {}", oldFeed.getName());
-            throw new IllegalStateException(oldFeed.getName() + " entity vertex must exist.");
-        }
-
-        updateDataClassification(oldFeed.getTags(), newFeed.getTags(), feedEntityVertex);
-        updateGroups(oldFeed.getGroups(), newFeed.getGroups(), feedEntityVertex);
-        updateFeedClusters(oldFeed.getClusters().getClusters(),
-                newFeed.getClusters().getClusters(), feedEntityVertex);
-    }
-
-    public void addProcessEntity(Process process) {
-        String processName = process.getName();
-        LOG.info("Adding process entity: {}", processName);
-        Vertex processVertex = addVertex(processName, RelationshipType.PROCESS_ENTITY);
-        addWorkflowProperties(process.getWorkflow(), processVertex, processName);
-
-        addUserRelation(processVertex);
-        addDataClassification(process.getTags(), processVertex);
-        addPipelines(process.getPipelines(), processVertex);
-
-        for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-            addRelationToCluster(processVertex, cluster.getName(), RelationshipLabel.PROCESS_CLUSTER_EDGE);
-        }
-
-        addInputFeeds(process.getInputs(), processVertex);
-        addOutputFeeds(process.getOutputs(), processVertex);
-    }
-
-    public void updateProcessEntity(Process oldProcess, Process newProcess) {
-        LOG.info("Updating process entity: {}", newProcess.getName());
-        Vertex processEntityVertex = findVertex(oldProcess.getName(), RelationshipType.PROCESS_ENTITY);
-        if (processEntityVertex == null) {
-            LOG.error("Illegal State: Process entity vertex must exist for {}", oldProcess.getName());
-            throw new IllegalStateException(oldProcess.getName() + " entity vertex must exist");
-        }
-
-        updateWorkflowProperties(oldProcess.getWorkflow(), newProcess.getWorkflow(),
-                processEntityVertex, newProcess.getName());
-        updateDataClassification(oldProcess.getTags(), newProcess.getTags(), processEntityVertex);
-        updatePipelines(oldProcess.getPipelines(), newProcess.getPipelines(), processEntityVertex);
-        updateProcessClusters(oldProcess.getClusters().getClusters(),
-                newProcess.getClusters().getClusters(), processEntityVertex);
-        updateProcessInputs(oldProcess.getInputs(), newProcess.getInputs(), processEntityVertex);
-        updateProcessOutputs(oldProcess.getOutputs(), newProcess.getOutputs(), processEntityVertex);
-    }
-
-    public void addColoRelation(String colo, Vertex fromVertex) {
-        Vertex coloVertex = addVertex(colo, RelationshipType.COLO);
-        addEdge(fromVertex, coloVertex, RelationshipLabel.CLUSTER_COLO.getName());
-    }
-
-    public void addRelationToCluster(Vertex fromVertex, String clusterName, RelationshipLabel edgeLabel) {
-        Vertex clusterVertex = findVertex(clusterName, RelationshipType.CLUSTER_ENTITY);
-        if (clusterVertex == null) { // cluster must exist before adding other entities
-            LOG.error("Illegal State: Cluster entity vertex must exist for {}", clusterName);
-            throw new IllegalStateException("Cluster entity vertex must exist: " + clusterName);
-        }
-
-        addEdge(fromVertex, clusterVertex, edgeLabel.getName());
-    }
-
-    public void addRelationToDatasource(Vertex fromVertex, String datasourceName, RelationshipLabel edgeLabel) {
-        Vertex clusterVertex = findVertex(datasourceName, RelationshipType.DATASOURCE_ENTITY);
-        if (clusterVertex == null) { // cluster must exist before adding other entities
-            LOG.error("Illegal State: Datasource entity vertex must exist for {}", datasourceName);
-            throw new IllegalStateException("Datasource entity vertex must exist: " + datasourceName);
-        }
-
-        addEdge(fromVertex, clusterVertex, edgeLabel.getName());
-    }
-
-    public void addInputFeeds(Inputs inputs, Vertex processVertex) {
-        if (inputs == null) {
-            return;
-        }
-
-        for (Input input : inputs.getInputs()) {
-            addProcessFeedEdge(processVertex, input.getFeed(), RelationshipLabel.FEED_PROCESS_EDGE);
-        }
-    }
-
-    public void addOutputFeeds(Outputs outputs, Vertex processVertex) {
-        if (outputs == null) {
-            return;
-        }
-
-        for (Output output : outputs.getOutputs()) {
-            addProcessFeedEdge(processVertex, output.getFeed(), RelationshipLabel.PROCESS_FEED_EDGE);
-        }
-    }
-
-    public void addProcessFeedEdge(Vertex processVertex, String feedName, RelationshipLabel edgeLabel) {
-        Vertex feedVertex = findVertex(feedName, RelationshipType.FEED_ENTITY);
-        if (feedVertex == null) {
-            LOG.error("Illegal State: Feed entity vertex must exist for {}", feedName);
-            throw new IllegalStateException("Feed entity vertex must exist: " + feedName);
-        }
-
-        addProcessFeedEdge(processVertex, feedVertex, edgeLabel);
-    }
-
-    public void addWorkflowProperties(Workflow workflow, Vertex processVertex, String processName) {
-        processVertex.setProperty(WorkflowExecutionArgs.USER_WORKFLOW_NAME.getName(),
-                ProcessHelper.getProcessWorkflowName(workflow.getName(), processName));
-        processVertex.setProperty(RelationshipProperty.VERSION.getName(), workflow.getVersion());
-        processVertex.setProperty(WorkflowExecutionArgs.USER_WORKFLOW_ENGINE.getName(),
-                workflow.getEngine().value());
-    }
-
-    public void updateWorkflowProperties(Workflow oldWorkflow, Workflow newWorkflow,
-                                         Vertex processEntityVertex, String processName) {
-        if (areSame(oldWorkflow, newWorkflow)) {
-            return;
-        }
-
-        LOG.info("Updating workflow properties for: {}", processEntityVertex);
-        addWorkflowProperties(newWorkflow, processEntityVertex, processName);
-    }
-
-    public void updateDataClassification(String oldClassification, String newClassification,
-                                         Vertex entityVertex) {
-        if (areSame(oldClassification, newClassification)) {
-            return;
-        }
-
-        removeDataClassification(oldClassification, entityVertex);
-        addDataClassification(newClassification, entityVertex);
-    }
-
-    private void removeDataClassification(String classification, Vertex entityVertex) {
-        if (classification == null || classification.length() == 0) {
-            return;
-        }
-
-        String[] oldTags = classification.split(",");
-        for (String oldTag : oldTags) {
-            int index = oldTag.indexOf("=");
-            String tagKey = oldTag.substring(0, index);
-            String tagValue = oldTag.substring(index + 1, oldTag.length());
-
-            removeEdge(entityVertex, tagValue, tagKey);
-        }
-    }
-
-    public void updateGroups(String oldGroups, String newGroups, Vertex entityVertex) {
-        if (areSame(oldGroups, newGroups)) {
-            return;
-        }
-
-        removeGroups(oldGroups, entityVertex);
-        addGroups(newGroups, entityVertex);
-    }
-
-    public void updatePipelines(String oldPipelines, String newPipelines, Vertex entityVertex) {
-        if (areSame(oldPipelines, newPipelines)) {
-            return;
-        }
-
-        removePipelines(oldPipelines, entityVertex);
-        addPipelines(newPipelines, entityVertex);
-    }
-
-    private void removeGroups(String groups, Vertex entityVertex) {
-        removeGroupsOrPipelines(groups, entityVertex, RelationshipLabel.GROUPS);
-    }
-
-    private void removePipelines(String pipelines, Vertex entityVertex) {
-        removeGroupsOrPipelines(pipelines, entityVertex, RelationshipLabel.PIPELINES);
-    }
-
-    private void removeGroupsOrPipelines(String groupsOrPipelines, Vertex entityVertex,
-                                         RelationshipLabel edgeLabel) {
-        if (StringUtils.isEmpty(groupsOrPipelines)) {
-            return;
-        }
-
-        String[] oldGroupOrPipelinesTags = groupsOrPipelines.split(",");
-        for (String groupOrPipelineTag : oldGroupOrPipelinesTags) {
-            removeEdge(entityVertex, groupOrPipelineTag, edgeLabel.getName());
-        }
-    }
-
-    public static boolean areSame(String oldValue, String newValue) {
-        return oldValue == null && newValue == null
-                || oldValue != null && newValue != null && oldValue.equals(newValue);
-    }
-
-    public void updateFeedClusters(List<org.apache.falcon.entity.v0.feed.Cluster> oldClusters,
-                                   List<org.apache.falcon.entity.v0.feed.Cluster> newClusters,
-                                   Vertex feedEntityVertex) {
-        if (areFeedClustersSame(oldClusters, newClusters)) {
-            return;
-        }
-
-        // remove edges to old clusters
-        for (org.apache.falcon.entity.v0.feed.Cluster oldCuster : oldClusters) {
-            if (ClusterType.TARGET != oldCuster.getType()) {
-                removeEdge(feedEntityVertex, oldCuster.getName(),
-                        RelationshipLabel.FEED_CLUSTER_EDGE.getName());
-            }
-        }
-
-        // add edges to new clusters
-        for (org.apache.falcon.entity.v0.feed.Cluster newCluster : newClusters) {
-            if (ClusterType.TARGET != newCluster.getType()) {
-                addRelationToCluster(feedEntityVertex, newCluster.getName(),
-                        RelationshipLabel.FEED_CLUSTER_EDGE);
-            }
-        }
-    }
-
-    public boolean areFeedClustersSame(List<org.apache.falcon.entity.v0.feed.Cluster> oldClusters,
-                                       List<org.apache.falcon.entity.v0.feed.Cluster> newClusters) {
-        if (oldClusters.size() != newClusters.size()) {
-            return false;
-        }
-
-        List<String> oldClusterNames = getFeedClusterNames(oldClusters);
-        List<String> newClusterNames = getFeedClusterNames(newClusters);
-
-        return oldClusterNames.size() == newClusterNames.size()
-                && oldClusterNames.containsAll(newClusterNames)
-                && newClusterNames.containsAll(oldClusterNames);
-    }
-
-    public List<String> getFeedClusterNames(List<org.apache.falcon.entity.v0.feed.Cluster> clusters) {
-        List<String> clusterNames = new ArrayList<String>(clusters.size());
-        for (org.apache.falcon.entity.v0.feed.Cluster cluster : clusters) {
-            clusterNames.add(cluster.getName());
-        }
-
-        return clusterNames;
-    }
-
-    public void updateProcessClusters(List<org.apache.falcon.entity.v0.process.Cluster> oldClusters,
-                                      List<org.apache.falcon.entity.v0.process.Cluster> newClusters,
-                                      Vertex processEntityVertex) {
-        if (areProcessClustersSame(oldClusters, newClusters)) {
-            return;
-        }
-
-        // remove old clusters
-        for (org.apache.falcon.entity.v0.process.Cluster oldCuster : oldClusters) {
-            removeEdge(processEntityVertex, oldCuster.getName(),
-                    RelationshipLabel.PROCESS_CLUSTER_EDGE.getName());
-        }
-
-        // add new clusters
-        for (org.apache.falcon.entity.v0.process.Cluster newCluster : newClusters) {
-            addRelationToCluster(processEntityVertex, newCluster.getName(),
-                    RelationshipLabel.PROCESS_CLUSTER_EDGE);
-        }
-    }
-
-    public boolean areProcessClustersSame(List<org.apache.falcon.entity.v0.process.Cluster> oldClusters,
-                                          List<org.apache.falcon.entity.v0.process.Cluster> newClusters) {
-        if (oldClusters.size() != newClusters.size()) {
-            return false;
-        }
-
-        List<String> oldClusterNames = getProcessClusterNames(oldClusters);
-        List<String> newClusterNames = getProcessClusterNames(newClusters);
-
-        return oldClusterNames.size() == newClusterNames.size()
-                && oldClusterNames.containsAll(newClusterNames)
-                && newClusterNames.containsAll(oldClusterNames);
-    }
-
-    public List<String> getProcessClusterNames(List<org.apache.falcon.entity.v0.process.Cluster> clusters) {
-        List<String> clusterNames = new ArrayList<String>(clusters.size());
-        for (org.apache.falcon.entity.v0.process.Cluster cluster : clusters) {
-            clusterNames.add(cluster.getName());
-        }
-
-        return clusterNames;
-    }
-
-    public static boolean areSame(Workflow oldWorkflow, Workflow newWorkflow) {
-        return areSame(oldWorkflow.getName(), newWorkflow.getName())
-                && areSame(oldWorkflow.getVersion(), newWorkflow.getVersion())
-                && areSame(oldWorkflow.getEngine().value(), newWorkflow.getEngine().value());
-    }
-
-    private void updateProcessInputs(Inputs oldProcessInputs, Inputs newProcessInputs,
-                                     Vertex processEntityVertex) {
-        if (areSame(oldProcessInputs, newProcessInputs)) {
-            return;
-        }
-
-        removeInputFeeds(oldProcessInputs, processEntityVertex);
-        addInputFeeds(newProcessInputs, processEntityVertex);
-    }
-
-    public static boolean areSame(Inputs oldProcessInputs, Inputs newProcessInputs) {
-        if (oldProcessInputs == null && newProcessInputs == null) {
-            return true;
-        }
-
-        if (oldProcessInputs == null || newProcessInputs == null
-                || oldProcessInputs.getInputs().size() != newProcessInputs.getInputs().size()) {
-            return false;
-        }
-
-        List<Input> oldInputs = oldProcessInputs.getInputs();
-        List<Input> newInputs = newProcessInputs.getInputs();
-
-        return oldInputs.size() == newInputs.size()
-                && oldInputs.containsAll(newInputs)
-                && newInputs.containsAll(oldInputs);
-    }
-
-    public void removeInputFeeds(Inputs inputs, Vertex processVertex) {
-        if (inputs == null) {
-            return;
-        }
-
-        for (Input input : inputs.getInputs()) {
-            removeProcessFeedEdge(processVertex, input.getFeed(), RelationshipLabel.FEED_PROCESS_EDGE);
-        }
-    }
-
-    public void removeOutputFeeds(Outputs outputs, Vertex processVertex) {
-        if (outputs == null) {
-            return;
-        }
-
-        for (Output output : outputs.getOutputs()) {
-            removeProcessFeedEdge(processVertex, output.getFeed(), RelationshipLabel.PROCESS_FEED_EDGE);
-        }
-    }
-
-    public void removeProcessFeedEdge(Vertex processVertex, String feedName, RelationshipLabel edgeLabel) {
-        Vertex feedVertex = findVertex(feedName, RelationshipType.FEED_ENTITY);
-        if (feedVertex == null) {
-            LOG.error("Illegal State: Feed entity vertex must exist for {}", feedName);
-            throw new IllegalStateException("Feed entity vertex must exist: " + feedName);
-        }
-
-        if (edgeLabel == RelationshipLabel.FEED_PROCESS_EDGE) {
-            removeEdge(feedVertex, processVertex, edgeLabel.getName());
-        } else {
-            removeEdge(processVertex, feedVertex, edgeLabel.getName());
-        }
-    }
-
-    private void updateProcessOutputs(Outputs oldProcessOutputs, Outputs newProcessOutputs,
-                                      Vertex processEntityVertex) {
-        if (areSame(oldProcessOutputs, newProcessOutputs)) {
-            return;
-        }
-
-        removeOutputFeeds(oldProcessOutputs, processEntityVertex);
-        addOutputFeeds(newProcessOutputs, processEntityVertex);
-    }
-
-    public static boolean areSame(Outputs oldProcessOutputs, Outputs newProcessOutputs) {
-        if (oldProcessOutputs == null && newProcessOutputs == null) {
-            return true;
-        }
-
-        if (oldProcessOutputs == null || newProcessOutputs == null
-                || oldProcessOutputs.getOutputs().size() != newProcessOutputs.getOutputs().size()) {
-            return false;
-        }
-
-        List<Output> oldOutputs = oldProcessOutputs.getOutputs();
-        List<Output> newOutputs = newProcessOutputs.getOutputs();
-
-        return oldOutputs.size() == newOutputs.size()
-                && oldOutputs.containsAll(newOutputs)
-                && newOutputs.containsAll(oldOutputs);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/metadata/GraphUtils.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/GraphUtils.java b/common/src/main/java/org/apache/falcon/metadata/GraphUtils.java
deleted file mode 100644
index 8bec02f..0000000
--- a/common/src/main/java/org/apache/falcon/metadata/GraphUtils.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.metadata;
-
-import com.tinkerpop.blueprints.Direction;
-import com.tinkerpop.blueprints.Edge;
-import com.tinkerpop.blueprints.Graph;
-import com.tinkerpop.blueprints.Vertex;
-import com.tinkerpop.blueprints.util.io.graphson.GraphSONWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * Utility class for graph operations.
- */
-public final class GraphUtils {
-
-    private static final Logger LOG = LoggerFactory.getLogger(GraphUtils.class);
-
-    private GraphUtils() {
-    }
-
-    public static void dumpToLog(final Graph graph) {
-        LOG.debug("Vertices of {}", graph);
-        for (Vertex vertex : graph.getVertices()) {
-            LOG.debug(vertexString(vertex));
-        }
-
-        LOG.debug("Edges of {}", graph);
-        for (Edge edge : graph.getEdges()) {
-            LOG.debug(edgeString(edge));
-        }
-    }
-
-    public static void dump(final Graph graph) throws IOException {
-        dump(graph, System.out);
-    }
-
-    public static void dump(final Graph graph, OutputStream outputStream) throws IOException {
-        GraphSONWriter.outputGraph(graph, outputStream);
-    }
-
-    public static void dump(final Graph graph, String fileName) throws IOException {
-        GraphSONWriter.outputGraph(graph, fileName);
-    }
-
-    public static String vertexString(final Vertex vertex) {
-        StringBuilder properties = new StringBuilder();
-        for (String propertyKey : vertex.getPropertyKeys()) {
-            properties.append(propertyKey)
-                    .append("=").append(vertex.getProperty(propertyKey))
-                    .append(", ");
-        }
-
-        return "v[" + vertex.getId() + "], Properties[" + properties + "]";
-    }
-
-    public static String edgeString(final Edge edge) {
-        return "e[" + edge.getLabel() + "], ["
-                + edge.getVertex(Direction.OUT).getProperty("name")
-                + " -> " + edge.getLabel() + " -> "
-                + edge.getVertex(Direction.IN).getProperty("name")
-                + "]";
-    }
-}


[23/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/EntitySpecification.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/EntitySpecification.twiki b/docs/src/site/twiki/EntitySpecification.twiki
deleted file mode 100644
index d08c3a3..0000000
--- a/docs/src/site/twiki/EntitySpecification.twiki
+++ /dev/null
@@ -1,996 +0,0 @@
----++ Contents
-   * <a href="#Cluster_Specification">Cluster Specification</a>
-   * <a href="#Feed_Specification">Feed Specification</a>
-   * <a href="#Process_Specification">Process Specification</a>
-   
----++ Cluster Specification
-The cluster XSD specification is available here:
-A cluster contains different interfaces which are used by Falcon like readonly, write, workflow and messaging.
-A cluster is referenced by feeds and processes which are on-boarded to Falcon by its name.
-
-Following are the tags defined in a cluster.xml:
-<verbatim>
-<cluster colo="gs" description="" name="corp" xmlns="uri:falcon:cluster:0.1"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-</verbatim>
-The colo specifies the colo to which this cluster belongs to and name is the name of the cluster which has to 
-be unique.
-
-
----+++ Interfaces
-
-A cluster has various interfaces as described below:
-<verbatim>
-    <interface type="readonly" endpoint="hftp://localhost:50010" version="0.20.2" />
-</verbatim>
-A readonly interface specifies the endpoint for Hadoop's HFTP protocol, 
-this would be used in the context of feed replication.
-
-<verbatim>
-<interface type="write" endpoint="hdfs://localhost:8020" version="0.20.2" />
-</verbatim>
-A write interface specifies the interface to write to hdfs, it's endpoint is the value of fs.defaultFS.
-Falcon uses this interface to write system data to hdfs and feeds referencing this cluster are written to hdfs
-using the same write interface.
-
-<verbatim>
-<interface type="execute" endpoint="localhost:8021" version="0.20.2" />
-</verbatim>
-An execute interface specifies the interface for job tracker, it's endpoint is the value of mapreduce.jobtracker.address.
-Falcon uses this interface to submit the processes as jobs on !JobTracker defined here.
-
-<verbatim>
-<interface type="workflow" endpoint="http://localhost:11000/oozie/" version="4.0" />
-</verbatim>
-A workflow interface specifies the interface for workflow engine, example of its endpoint is the value for OOZIE_URL.
-Falcon uses this interface to schedule the processes referencing this cluster on workflow engine defined here.
-
-<verbatim>
-<interface type="registry" endpoint="thrift://localhost:9083" version="0.11.0" />
-</verbatim>
-A registry interface specifies the interface for metadata catalog, such as Hive Metastore (or HCatalog).
-Falcon uses this interface to register/de-register partitions for a given database and table. Also,
-uses this information to schedule data availability events based on partitions in the workflow engine.
-Although Hive metastore supports both RPC and HTTP, Falcon comes with an implementation for RPC over thrift.
-
-<verbatim>
-<interface type="messaging" endpoint="tcp://localhost:61616?daemon=true" version="5.4.6" />
-</verbatim>
-A messaging interface specifies the interface for sending feed availability messages, it's endpoint is broker url with tcp address.
-
----+++ Locations
-
-A cluster has a list of locations defined:
-<verbatim>
-<location name="staging" path="/projects/falcon/staging" />
-<location name="working" path="/projects/falcon/working" /> <!--optional-->
-</verbatim>
-Location has the name and the path, name is the type of locations .Allowed values of name are staging, temp and working.
-Path is the hdfs path for each location.
-Falcon would use the location to do intermediate processing of entities in hdfs and hence Falcon
-should have read/write/execute permission on these locations.
-These locations MUST be created prior to submitting a cluster entity to Falcon.
-*staging* should have 777 permissions and is a mandatory location .The parent dirs must have execute permissions so multiple
-users can write to this location. *working* must have 755 permissions and is a optional location.
-If *working* is not specified, falcon creates a sub directory in the *staging* location with 755 perms.
-The parent dir for *working* must have execute permissions so multiple
-users can read from this location
-
----+++ ACL
-
-A cluster has ACL (Access Control List) useful for implementing permission requirements
-and provide a way to set different permissions for specific users or named groups.
-<verbatim>
-    <ACL owner="test-user" group="test-group" permission="*"/>
-</verbatim>
-ACL indicates the Access control list for this cluster.
-owner is the Owner of this entity.
-group is the one which has access to read.
-permission indicates the permission.
-
----+++ Custom Properties
-
-A cluster has a list of properties:
-A key-value pair, which are propagated to the workflow engine.
-<verbatim>
-<property name="brokerImplClass" value="org.apache.activemq.ActiveMQConnectionFactory" />
-</verbatim>
-Ideally JMS impl class name of messaging engine (brokerImplClass) 
-should be defined here.
-
----++ Datasource Specification
-
-The datasource entity contains connection information required to connect to a data source like MySQL database.
-The datasource XSD specification is available here:
-A datasource contains read and write interfaces which are used by Falcon to import or export data from or to
-datasources respectively. A datasource is referenced by feeds which are on-boarded to Falcon by its name.
-
-Following are the tags defined in a datasource.xml:
-
-<verbatim>
-<datasource colo="west-coast" description="Customer database on west coast" type="mysql"
- name="test-hsql-db" xmlns="uri:falcon:datasource:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-</verbatim>
-
-The colo specifies the colo to which the datasource belongs to and name is the name of the datasource which has to
-be unique.
-
----+++ Interfaces
-
-A datasource has two interfaces as described below:
-<verbatim>
-    <interface type="readonly" endpoint="jdbc:hsqldb:localhost/db"/>
-</verbatim>
-
-A readonly interface specifies the endpoint and protocol to connect to a datasource.
-This would be used in the context of import from datasource into HDFS.
-
-<verbatim>
-<interface type="write" endpoint="jdbc:hsqldb:localhost/db1">
-</verbatim>
-
-A write interface specifies the endpoint and protocol to to write to the datasource.
-Falcon uses this interface to export data from hdfs to datasource.
-
-<verbatim>
-<credential type="password-text">
-    <userName>SA</userName>
-    <passwordText></passwordText>
-</credential>
-</verbatim>
-
-
-A credential is associated with an interface (read or write) providing user name and password to authenticate
-to the datasource.
-
-<verbatim>
-<credential type="password-text">
-     <userName>SA</userName>
-     <passwordFile>hdfs-file-path</passwordText>
-</credential>
-</verbatim>
-
-The credential can be specified via a password file present in the HDFS. This file should only be accessible by
-the user.
-
----++ Feed Specification
-The Feed XSD specification is available here.
-A Feed defines various attributes of feed like feed location, frequency, late-arrival handling and retention policies.
-A feed can be scheduled on a cluster, once a feed is scheduled its retention and replication process are triggered in a given cluster.
-<verbatim>
-<feed description="clicks log" name="clicks" xmlns="uri:falcon:feed:0.1"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-</verbatim>
-A feed should have a unique name and this name is referenced by processes as input or output feed.
-
----+++ Storage
-Falcon introduces a new abstraction to encapsulate the storage for a given feed which can either be
-expressed as a path on the file system, File System Storage or a table in a catalog such as Hive, Catalog Storage.
-
-<verbatim>
-    <xs:choice minOccurs="1" maxOccurs="1">
-        <xs:element type="locations" name="locations"/>
-        <xs:element type="catalog-table" name="table"/>
-    </xs:choice>
-</verbatim>
-
-Feed should contain one of the two storage options. Locations on File System or Table in a Catalog.
-
----++++ File System Storage
-
-<verbatim>
-        <clusters>
-        <cluster name="test-cluster">
-            <validity start="2012-07-20T03:00Z" end="2099-07-16T00:00Z"/>
-            <retention limit="days(10)" action="delete"/>
-            <sla slaLow="hours(3)" slaHigh="hours(4)"/>
-            <locations>
-                <location type="data" path="/hdfsDataLocation/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}"/>
-                <location type="stats" path="/projects/falcon/clicksStats" />
-                <location type="meta" path="/projects/falcon/clicksMetaData" />
-            </locations>
-        </cluster>
-..... more clusters </clusters>
-</verbatim>
-Feed references a cluster by it's name, before submitting a feed all the referenced cluster should be submitted to Falcon.
-type: specifies whether the referenced cluster should be treated as a source or target for a feed. A feed can have multiple source and target clusters. If the type of cluster is not specified then the cluster is not considered for replication.
-Validity of a feed on cluster specifies duration for which this feed is valid on this cluster.
-Retention specifies how long the feed is retained on this cluster and the action to be taken on the feed after the expiry of retention period.
-The retention limit is specified by expression frequency(times), ex: if feed should be retained for at least 6 hours then retention's limit="hours(6)".
-The field partitionExp contains partition tags. Number of partition tags has to be equal to number of partitions specified in feed schema. A partition tag can be a wildcard(*), a static string or an expression. Atleast one of the strings has to be an expression.
-sla specifies sla for the feed on this cluster. This is an optional parameter and sla can be same or different from the
-global sla tag (mentioned outside the clusters tag ). This tag provides the user to flexibility to have
-different sla for different clusters e.g. in case of replication. If this attribute is missing then the default global
-sla is picked from the feed definition.
-Location specifies where the feed is available on this cluster. This is an optional parameter and path can be same or different from the global locations tag value ( it is mentioned outside the clusters tag ) . This tag provides the user to flexibility to have feed at different locations on different clusters. If this attribute is missing then the default global location is picked from the feed definition. Also the individual location tags data, stats, meta are optional.
-<verbatim>
- <location type="data" path="/projects/falcon/clicks" />
- <location type="stats" path="/projects/falcon/clicksStats" />
- <location type="meta" path="/projects/falcon/clicksMetaData" />
-</verbatim>
-A location tag specifies the type of location like data, meta, stats and the corresponding paths for them.
-A feed should at least define the location for type data, which specifies the HDFS path pattern where the feed is generated
-periodically. ex: type="data" path="/projects/TrafficHourly/${YEAR}-${MONTH}-${DAY}/traffic"
-The granularity of date pattern in the path should be at least that of a frequency of a feed.
-Other location type which are supported are stats and meta paths, if a process references a feed then the meta and stats
-paths are available as a property in a process.
-
----++++ Catalog Storage (Table)
-
-A table tag specifies the table URI in the catalog registry as:
-<verbatim>
-catalog:$database-name:$table-name#partition-key=partition-value);partition-key=partition-value);*
-</verbatim>
-
-This is modeled as a URI (similar to an ISBN URI). It does not have any reference to Hive or HCatalog. Its quite
-generic so it can be tied to other implementations of a catalog registry. The catalog implementation specified
-in the startup config provides implementation for the catalog URI.
-
-Top-level partition has to be a dated pattern and the granularity of date pattern should be at least that
-of a frequency of a feed.
-
-<verbatim>
-    <xs:complexType name="catalog-table">
-        <xs:annotation>
-            <xs:documentation>
-                catalog specifies the uri of a Hive table along with the partition spec.
-                uri="catalog:$database:$table#(partition-key=partition-value);+"
-                Example: catalog:logs-db:clicks#ds=${YEAR}-${MONTH}-${DAY}
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="uri" use="required"/>
-    </xs:complexType>
-</verbatim>
-
-Examples:
-<verbatim>
-<table uri="catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR};region=${region}" />
-<table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-<table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-</verbatim>
-
----+++ Partitions
-
-<verbatim>
-   <partitions>
-        <partition name="country" />
-        <partition name="cluster" />
-    </partitions>
-</verbatim>
-A feed can define multiple partitions, if a referenced cluster defines partitions then the number of partitions in feed has to be equal to or more than the cluster partitions.
-
-*Note:* This will only apply for !FileSystem storage but not Table storage as partitions are defined and maintained in
-Hive (HCatalog) registry.
-
----+++ Groups
-
-<verbatim>
-    <groups>online,bi</groups>
-</verbatim>
-A feed specifies a list of comma separated groups, a group is a logical grouping of feeds and a group is said to be
-available if all the feeds belonging to a group are available. The frequency of all the feed which belong to the same group
-must be same.
-
----+++ Availability Flags
-
-<verbatim>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
-</verbatim>
-An availabilityFlag specifies the name of a file which when present/created in a feeds data directory, 
-the feed is termed as available. ex: _SUCCESS, if this element is ignored then Falcon would consider the presence of feed's
-data directory as feed availability.
-
----+++ Frequency
-
-<verbatim>
-    <frequency>minutes(20)</frequency>
-</verbatim>
-A feed has a frequency which specifies the frequency by which this feed is generated. 
-ex: it can be generated every hour, every 5 minutes, daily, weekly etc.
-valid frequency type for a feed are minutes, hours, days, months. The values can be negative, zero or positive.
-
----+++ SLA
-<verbatim>
-    <sla slaLow="hours(40)" slaHigh="hours(44)" />
-</verbatim>
-
-A feed can have SLA and each SLA has two properties - slaLow and slaHigh. Both slaLow and slaHigh are written using
-expressions like frequency. slaLow is intended to serve for alerting for feed instances which are in danger of missing their
-availability SLAs. slaHigh is intended to serve for reporting the feeds which missed their SLAs. SLAs are relative to
-feed instance time.
-
----+++ Import
-
-<verbatim>
-<import>
-    <source name="test-hsql-db" tableName="customer">
-        <extract type="full">
-            <mergepolicy>snapshot</mergepolicy>
-         </extract>
-         <fields>
-            <includes>
-                <field>id</field>
-                <field>name</field>
-            </includes>
-         </fields>
-    </source>
-    <arguments>
-        <argument name="--split-by" value="id"/>
-        <argument name="--num-mappers" value="2"/>
-    </arguments>
-</import>
-
-A feed can have an import policy associated with it. The souce name specified the datasource reference to the
-datasource entity from which the data will be imported to HDFS. The tableName spcified the table or topic to be
-imported from the datasource. The extract type specifies the pull mechanism (full or
-incremental extract). Full extract method extracts all the data from the datasource. The incremental extraction
-method feature implementation is in progress. The mergeplocy determines how the data is to be layed out on HDFS.
-The snapshot layout creates a snapshot of the data on HDFS using the feed's location specification. Fields is used
-to specify the projection columns. Feed import from database underneath uses sqoop to achieve the task. Any advanced
-Sqoop options can be specified via the arguments.
-
----+++ Late Arrival
-
-<verbatim>
-    <late-arrival cut-off="hours(6)" />
-</verbatim>
-A late-arrival specifies the cut-off period till which the feed is expected to arrive late and should be honored be processes referring to it as input feed by rerunning the instances in case the data arrives late with in a cut-off period.
-The cut-off period is specified by expression frequency(times), ex: if the feed can arrive late
-upto 8 hours then late-arrival's cut-off="hours(8)"
-
-*Note:* This will only apply for !FileSystem storage but not Table storage until a future time.
-
-
----+++ Email Notification
-
-<verbatim>
-    <notification type="email" to="bob@xyz.com"/>
-</verbatim>
-Specifying the notification element with "type" property allows users to receive email notification when a scheduled feed instance completes.
-Multiple recipients of an email can be provided as comma separated addresses with "to" property.
-To send email notification ensure that SMTP parameters are defined in Falcon startup.properties.
-Refer to [[FalconEmailNotification][Falcon Email Notification]] for more details.
-
-
----+++ ACL
-
-A feed has ACL (Access Control List) useful for implementing permission requirements
-and provide a way to set different permissions for specific users or named groups.
-<verbatim>
-    <ACL owner="test-user" group="test-group" permission="*"/>
-</verbatim>
-ACL indicates the Access control list for this cluster.
-owner is the Owner of this entity.
-group is the one which has access to read.
-permission indicates the permission.
-
----+++ Custom Properties
-
-<verbatim>
-    <properties>
-        <property name="tmpFeedPath" value="tmpFeedPathValue" />
-        <property name="field2" value="value2" />
-        <property name="queueName" value="hadoopQueue"/>
-        <property name="jobPriority" value="VERY_HIGH"/>
-        <property name="timeout" value="hours(1)"/>
-        <property name="parallel" value="3"/>
-        <property name="maxMaps" value="8"/>
-        <property name="mapBandwidth" value="1"/>
-        <property name="overwrite" value="true"/>
-        <property name="ignoreErrors" value="false"/>
-        <property name="skipChecksum" value="false"/>
-        <property name="removeDeletedFiles" value="true"/>
-        <property name="preserveBlockSize" value="true"/>
-        <property name="preserveReplicationNumber" value="true"/>
-        <property name="preservePermission" value="true"/>
-        <property name="order" value="LIFO"/>
-    </properties>
-</verbatim>
-A key-value pair, which are propagated to the workflow engine. "queueName" and "jobPriority" are special properties
-available to user to specify the Hadoop job queue and priority, the same values are used by Falcon's launcher job.
-"timeout", "parallel" and "order" are other special properties which decides replication instance's timeout value while
-waiting for the feed instance, parallel decides the concurrent replication instances that can run at any given time and
-order decides the execution order for replication instances like FIFO, LIFO and LAST_ONLY.
-DistCp options can be passed as custom properties, which will be propagated to the DistCp tool. "maxMaps" represents
-the maximum number of maps used during replication. "mapBandwidth" represents the bandwidth in MB/s
-used by each mapper during replication. "overwrite" represents overwrite destination during replication.
-"ignoreErrors" represents ignore failures not causing the job to fail during replication. "skipChecksum" represents
-bypassing checksum verification during replication. "removeDeletedFiles" represents deleting the files existing in the
-destination but not in source during replication. "preserveBlockSize" represents preserving block size during
-replication. "preserveReplicationNumber" represents preserving replication number during replication.
-"preservePermission" represents preserving permission during
-
-
----+++ Lifecycle
-<verbatim>
-
-<lifecycle>
-    <retention-stage>
-        <frequency>hours(10)</frequency>
-        <queue>reports</queue>
-        <priority>NORMAL</priority>
-        <properties>
-            <property name="retention.policy.agebaseddelete.limit" value="hours(9)"></property>
-        </properties>
-    </retention-stage>
-</lifecycle>
-
-</verbatim>
-
-lifecycle tag is the new way to define various stages of a feed's lifecycle. In the example above we have defined a
-retention-stage using lifecycle tag. You may define lifecycle at global level or a cluster level or both. Cluster level
-configuration takes precedence and falcon falls back to global definition if cluster level specification is missing.
-
-
-----++++ Retention Stage
-As of now there are two ways to specify retention. One is through the <retention> tag in the cluster and another is the
-new way through <retention-stage> tag in <lifecycle> tag. If both are defined for a feed, then the lifecycle tag will be
-considered effective and falcon will ignore the <retention> tag in the cluster. If there is an invalid configuration of
-retention-stage in lifecycle tag, then falcon will *NOT* fall back to retention tag even if it is defined and will
-throw validation error.
-
-In this new method of defining retention you can specify the frequency at which the retention should occur, you can
-also define the queue and priority parameters for retention jobs. The default behavior of retention-stage is same as
-the existing one which is to delete all instances corresponding to instance-time earlier than the duration provided in
-"retention.policy.agebaseddelete.limit"
-
-Property "retention.policy.agebaseddelete.limit" is a mandatory property and must contain a valid duration e.g. "hours(1)"
-Retention frequency is not a mandatory parameter. If user doesn't specify the frequency in the retention stage then
-it doesn't fallback to old retention policy frequency. Its default value is set to 6 hours if feed frequency is less
-than 6 hours else its set to feed frequency as retention shouldn't be more frequent than data availability to avoid
-wastage of compute resources.
-
-In future, we will allow more customisation like customising how to choose instances to be deleted through this method.
-
-
-
----++ Process Specification
-A process defines configuration for a workflow. A workflow is a directed acyclic graph(DAG) which defines the job for the workflow engine. A process definition defines  the configurations required to run the workflow job. For example, process defines the frequency at which the workflow should run, the clusters on which the workflow should run, the inputs and outputs for the workflow, how the workflow failures should be handled, how the late inputs should be handled and so on.  
-
-The different details of process are:
----+++ Name
-Each process is identified with a unique name.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-</process>
-</verbatim>
-
----+++ Tags
-An optional list of comma separated tags which are used for classification of processes.
-Syntax:
-<verbatim>
-...
-    <tags>consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting</tags>
-</verbatim>
-
----+++ Pipelines
-An optional list of comma separated word strings, specifies the data processing pipeline(s) to which this process belongs.
-Only letters, numbers and underscore are allowed for pipeline string.
-Syntax:
-<verbatim>
-...
-    <pipelines>test_Pipeline, dataReplication, clickStream_pipeline</pipelines>
-</verbatim>
-
----+++ Cluster
-The cluster on which the workflow should run. A process should contain one or more clusters. Cluster definition for the cluster name gives the end points for workflow execution, name node, job tracker, messaging and so on. Each cluster inturn has validity mentioned, which tell the times between which the job should run on that specified cluster. 
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-   <clusters>
-        <cluster name="test-cluster1">
-            <validity start="2012-12-21T08:15Z" end="2100-01-01T00:00Z"/>
-        </cluster>
-        <cluster name="test-cluster2">
-            <validity start="2012-12-21T08:15Z" end="2100-01-01T00:00Z"/>
-        </cluster>
-       ....
-       ....
-    </clusters>
-
-...
-</process>
-</verbatim>
-
----+++ Parallel
-Parallel defines how many instances of the workflow can run concurrently. It should be a positive integer > 0.
-For example, parallel of 1 ensures that only one instance of the workflow can run at a time. The next instance will start only after the running instance completes.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-   <parallel>[parallel]</parallel>
-...
-</process>
-</verbatim>
-
----+++ Order
-Order defines the order in which the ready instances are picked up. The possible values are FIFO(First In First Out), LIFO(Last In First Out), and ONLYLAST(Last Only).
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-   <order>[order]</order>
-...
-</process>
-</verbatim>
-
----+++ Timeout
-A optional Timeout specifies the maximum time an instance waits for a dataset before being killed by the workflow engine, a time out is specified like frequency.
-If timeout is not specified, falcon computes a default timeout for a process based on its frequency, which is six times of the frequency of process or 30 minutes if computed timeout is less than 30 minutes.
-<verbatim>
-<process name="[process name]">
-...
-   <timeout>[timeunit]([frequency])</timeout>
-...
-</process>
-</verbatim>
-
----+++ Frequency
-Frequency defines how frequently the workflow job should run. For example, hours(1) defines the frequency as hourly, days(7) defines weekly frequency. The values for timeunit can be minutes/hours/days/months and the frequency number should be a positive integer > 0. 
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-   <frequency>[timeunit]([frequency])</order>
-...
-</process>
-</verbatim>
-
----+++ SLA
-<verbatim>
-    <sla shouldStartIn="hours(2)" shouldEndIn="hours(4)"/>
-</verbatim>
-A process can have SLA which is defined by 2 optional attributes - shouldStartIn and shouldEndIn. All the attributes
-are written using expressions like frequency. shouldStartIn is the time by which the process should have started.
-shouldEndIn is the time by which the process should have finished.
-
-
----+++ Validity
-Validity defines how long the workflow should run. It has 3 components - start time, end time and timezone. Start time and end time are timestamps defined in yyyy-MM-dd'T'HH:mm'Z' format and should always be in UTC. Timezone is used to compute the next instances starting from start time. The workflow will start at start time and end before end time specified on a given cluster. So, there will not be a workflow instance at end time.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-   <validity start=[start time] end=[end time] timezone=[timezone]/>
-...
-</process>
-</verbatim>
-
-Examples:
-<verbatim>
-<process name="sample-process">
-...
-    <frequency>days(1)</frequency>
-    <validity start="2012-01-01T00:40Z" end="2012-04-01T00:00" timezone="UTC"/>
-...
-</process>
-</verbatim>
-The daily workflow will start on Jan 1st 2012 at 00:40 UTC, it will run at 40th minute of every hour and the last instance will be at March 31st 2012 at 23:40 UTC.
-                                                                                               
-<verbatim>
-<process name="sample-process">
-...
-    <frequency>hours(1)</frequency>
-    <validity start="2012-03-11T08:40Z" end="2012-03-12T08:00" timezone="PST8PDT"/>
-...
-</process>
-</verbatim>
-The hourly workflow will start on March 11th 2012 at 00:40 PST, the next instances will be at 01:40 PST, 03:40 PDT, 04:40 PDT and so on till 23:40 PDT. So, there will be just 23 instances of the workflow for March 11th 2012 because of DST switch.
-
----+++ Inputs
-Inputs define the input data for the workflow. The workflow job will start executing only after the schedule time and when all the inputs are available. There can be 0 or more inputs and each of the input maps to a feed. The path and frequency of input data is picked up from feed definition. Each input should also define start and end instances in terms of [[FalconDocumentation][EL expressions]] and can optionally specify specific partition of input that the workflow requires. The components in partition should be subset of partitions defined in the feed.
-
-For each input, Falcon will create a property with the input name that contains the comma separated list of input paths. This property can be used in workflow actions like pig scripts and so on.
-
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <inputs>
-        <input name=[input name] feed=[feed name] start=[start el] end=[end el] partition=[partition]/>
-        ...
-    </inputs>
-...
-</process>
-</verbatim>
-
-Example:
-<verbatim>
-<feed name="feed1">
-...
-    <partition name="isFraud"/>
-    <partition name="country"/>
-    <frequency>hours(1)</frequency>
-    <locations>
-        <location type="data" path="/projects/bootcamp/feed1/${YEAR}-${MONTH}-${DAY}-${HOUR}"/>
-        ...
-    </locations>
-...
-</feed>
-<process name="sample-process">
-...
-    <inputs>
-        <input name="input1" feed="feed1" start="today(0,0)" end="today(1,0)" partition="*/US"/>
-        ...
-    </inputs>
-...
-</process>
-</verbatim>
-The input for the workflow is a hourly feed and takes 0th and 1st hour data of today(the day when the workflow runs).
-If the workflow is running for 2012-03-01T06:40Z, the inputs are /projects/bootcamp/feed1/2012-03-01-00/*/US and
-/projects/bootcamp/feed1/2012-03-01-01/*/US. The property for this input is
-input1=/projects/bootcamp/feed1/2012-03-01-00/*/US,/projects/bootcamp/feed1/2012-03-01-01/*/US
-
-Also, feeds with Hive table storage can be used as inputs to a process. Several parameters from inputs are passed as
-params to the user workflow or pig script.
-
-<verbatim>
-    ${wf:conf('falcon_input_database')} - database name associated with the feed for a given input
-    ${wf:conf('falcon_input_table')} - table name associated with the feed for a given input
-    ${wf:conf('falcon_input_catalog_url')} - Hive metastore URI for this input feed
-    ${wf:conf('falcon_input_partition_filter_pig')} - value of ${coord:dataInPartitionFilter('$input', 'pig')}
-    ${wf:conf('falcon_input_partition_filter_hive')} - value of ${coord:dataInPartitionFilter('$input', 'hive')}
-    ${wf:conf('falcon_input_partition_filter_java')} - value of ${coord:dataInPartitionFilter('$input', 'java')}
-</verbatim>
-
-*NOTE:* input is the name of the input configured in the process, which is input.getName().
-<verbatim><input name="input" feed="clicks-raw-table" start="yesterday(0,0)" end="yesterday(20,0)"/></verbatim>
-
-Example workflow configuration:
-
-<verbatim>
-<configuration>
-  <property>
-    <name>falcon_input_database</name>
-    <value>falcon_db</value>
-  </property>
-  <property>
-    <name>falcon_input_table</name>
-    <value>input_table</value>
-  </property>
-  <property>
-    <name>falcon_input_catalog_url</name>
-    <value>thrift://localhost:29083</value>
-  </property>
-  <property>
-    <name>falcon_input_storage_type</name>
-    <value>TABLE</value>
-  </property>
-  <property>
-    <name>feedInstancePaths</name>
-    <value>hcat://localhost:29083/falcon_db/output_table/ds=2012-04-21-00</value>
-  </property>
-  <property>
-    <name>falcon_input_partition_filter_java</name>
-    <value>(ds='2012-04-21-00')</value>
-  </property>
-  <property>
-    <name>falcon_input_partition_filter_hive</name>
-    <value>(ds='2012-04-21-00')</value>
-  </property>
-  <property>
-    <name>falcon_input_partition_filter_pig</name>
-    <value>(ds=='2012-04-21-00')</value>
-  </property>
-  ...
-</configuration>
-</verbatim>
-
-
----+++ Optional Inputs
-User can mention one or more inputs as optional inputs. In such cases the job does not wait on those inputs which are
-mentioned as optional. If they are present it considers them otherwise continue with the compulsory ones.
-Example:
-<verbatim>
-<feed name="feed1">
-...
-    <partition name="isFraud"/>
-    <partition name="country"/>
-    <frequency>hours(1)</frequency>
-    <locations>
-        <location type="data" path="/projects/bootcamp/feed1/${YEAR}-${MONTH}-${DAY}-${HOUR}"/>
-        ...
-    </locations>
-...
-</feed>
-<process name="sample-process">
-...
-    <inputs>
-        <input name="input1" feed="feed1" start="today(0,0)" end="today(1,0)" partition="*/US"/>
-        <input name="input2" feed="feed2" start="today(0,0)" end="today(1,0)" partition="*/UK" optional="true" />
-        ...
-    </inputs>
-...
-</process>
-</verbatim>
-
-*Note:* This is only supported for !FileSystem storage but not Table storage at this point.
-
-
----+++ Outputs
-Outputs define the output data that is generated by the workflow. A process can define 0 or more outputs. Each output is mapped to a feed and the output path is picked up from feed definition. The output instance that should be generated is specified in terms of [[FalconDocumentation][EL expression]].
-
-For each output, Falcon creates a property with output name that contains the path of output data. This can be used in workflows to store in the path.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <outputs>
-        <output name=[input name] feed=[feed name] instance=[instance el]/>
-        ...
-    </outputs>
-...
-</process>
-</verbatim>
-
-Example:
-<verbatim>
-<feed name="feed2">
-...
-    <frequency>days(1)</frequency>
-    <locations>
-        <location type="data" path="/projects/bootcamp/feed2/${YEAR}-${MONTH}-${DAY}"/>
-        ...
-    </locations>
-...
-</feed>
-<process name="sample-process">
-...
-    <outputs>
-        <output name="output1" feed="feed2" instance="today(0,0)"/>
-        ...
-    </outputs>
-...
-</process>
-</verbatim>
-The output of the workflow is feed instance for today. If the workflow is running for 2012-03-01T06:40Z,
-the workflow generates output /projects/bootcamp/feed2/2012-03-01. The property for this output that is available
-for workflow is: output1=/projects/bootcamp/feed2/2012-03-01
-
-Also, feeds with Hive table storage can be used as outputs to a process. Several parameters from outputs are passed as
-params to the user workflow or pig script.
-<verbatim>
-    ${wf:conf('falcon_output_database')} - database name associated with the feed for a given output
-    ${wf:conf('falcon_output_table')} - table name associated with the feed for a given output
-    ${wf:conf('falcon_output_catalog_url')} - Hive metastore URI for the given output feed
-    ${wf:conf('falcon_output_dataout_partitions')} - value of ${coord:dataOutPartitions('$output')}
-</verbatim>
-
-*NOTE:* output is the name of the output configured in the process, which is output.getName().
-<verbatim><output name="output" feed="clicks-summary-table" instance="today(0,0)"/></verbatim>
-
-Example workflow configuration:
-
-<verbatim>
-<configuration>
-  <property>
-    <name>falcon_output_database</name>
-    <value>falcon_db</value>
-  </property>
-  <property>
-    <name>falcon_output_table</name>
-    <value>output_table</value>
-  </property>
-  <property>
-    <name>falcon_output_catalog_url</name>
-    <value>thrift://localhost:29083</value>
-  </property>
-  <property>
-    <name>falcon_output_storage_type</name>
-    <value>TABLE</value>
-  </property>
-  <property>
-    <name>feedInstancePaths</name>
-    <value>hcat://localhost:29083/falcon_db/output_table/ds=2012-04-21-00</value>
-  </property>
-  <property>
-    <name>falcon_output_dataout_partitions</name>
-    <value>'ds=2012-04-21-00'</value>
-  </property>
-  ....
-</configuration>
-</verbatim>
-
----+++ Custom Properties
-The properties are key value pairs that are passed to the workflow. These properties are optional and can be used
-in workflow to parameterize the workflow.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <properties>
-        <property name=[key] value=[value]/>
-        ...
-    </properties>
-...
-</process>
-</verbatim>
-
-The following are some special properties, which when present are used by the Falcon's launcher job, the same property is also available in workflow which can be used to propagate to pig or M/R job.
-<verbatim>
-        <property name="queueName" value="hadoopQueue"/>
-        <property name="jobPriority" value="VERY_HIGH"/>
-        <!-- This property is used to turn off JMS notifications for this process. JMS notifications are enabled by default. -->
-        <property name="userJMSNotificationEnabled" value="false"/>
-</verbatim>
-
----+++ Workflow
-
-The workflow defines the workflow engine that should be used and the path to the workflow on hdfs.
-Libraries required can be specified using lib attribute in the workflow element and will be comma separated HDFS paths.
-The workflow definition on hdfs contains the actual job that should run and it should confirm to
-the workflow specification of the engine specified. The libraries required by the workflow should
-be in lib folder inside the workflow path.
-
-The properties defined in the cluster and cluster properties(nameNode and jobTracker) will also
-be available for the workflow.
-
-There are 3 engines supported today.
-
----++++ Oozie
-
-As part of oozie workflow engine support, users can embed a oozie workflow.
-Refer to oozie [[http://oozie.apache.org/docs/4.0.1/DG_Overview.html][workflow overview]] and
-[[http://oozie.apache.org/docs/4.0.1/WorkflowFunctionalSpec.html][workflow specification]] for details.
-
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <workflow engine=[workflow engine] path=[workflow path] lib=[comma separated lib paths]/>
-...
-</process>
-</verbatim>
-
-Example:
-<verbatim>
-<process name="sample-process">
-...
-    <workflow engine="oozie" path="/projects/bootcamp/workflow"/>
-...
-</process>
-</verbatim>
-
-This defines the workflow engine to be oozie and the workflow xml is defined at
-/projects/bootcamp/workflow/workflow.xml. The libraries are at /projects/bootcamp/workflow/lib.
-Libraries path can be overridden using lib attribute. e.g.: lib="/projects/bootcamp/wf/libs,/projects/bootcamp/oozie/libs" in the workflow element.
-
----++++ Pig
-
-Falcon also adds the Pig engine which enables users to embed a Pig script as a process.
-
-Example:
-<verbatim>
-<process name="sample-process">
-...
-    <workflow engine="pig" path="/projects/bootcamp/pig.script" lib="/projects/bootcamp/wf/libs,/projects/bootcamp/pig/libs"/>
-...
-</process>
-</verbatim>
-
-This defines the workflow engine to be pig and the pig script is defined at
-/projects/bootcamp/pig.script.
-
-Feeds with Hive table storage will send one more parameter apart from the general ones:
-<verbatim>$input_filter</verbatim>
-
----++++ Hive
-
-Falcon also adds the Hive engine as part of Hive Integration which enables users to embed a Hive script as a process.
-This would enable users to create materialized queries in a declarative way.
-
-Example:
-<verbatim>
-<process name="sample-process">
-...
-    <workflow engine="hive" path="/projects/bootcamp/hive-script.hql"/>
-...
-</process>
-</verbatim>
-
-This defines the workflow engine to be hive and the hive script is defined at
-/projects/bootcamp/hive-script.hql.
-
-Feeds with Hive table storage will send one more parameter apart from the general ones:
-<verbatim>$input_filter</verbatim>
-
----+++ Retry
-Retry policy defines how the workflow failures should be handled. Three retry policies are defined: periodic, exp-backoff(exponential backoff) and final. Depending on the delay and number of attempts, the workflow is re-tried after specific intervals. If user sets the onTimeout attribute to "true", retries will happen for TIMED_OUT instances.
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <retry policy=[retry policy] delay=[retry delay] attempts=[retry attempts] onTimeout=[retry onTimeout]/>
-...
-</process>
-</verbatim>
-
-Examples:
-<verbatim>
-<process name="sample-process">
-...
-    <retry policy="periodic" delay="minutes(10)" attempts="3" onTimeout="true"/>
-...
-</process>
-</verbatim>
-The workflow is re-tried after 10 mins, 20 mins and 30 mins. With exponential backoff, the workflow will be re-tried after 10 mins, 20 mins and 40 mins.
-
-*NOTE :* If user does a manual rerun with -force option (using the instance rerun API), then the runId will get reset and user might see more Falcon system retries than configured in the process definition.
-
-To enable retries for instances for feeds, user will have to set the following properties in runtime.properties
-<verbatim>
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-<verbatim>
----+++ Late data
-Late data handling defines how the late data should be handled. Each feed is defined with a late cut-off value which specifies the time till which late data is valid. For example, late cut-off of hours(6) means that data for nth hour can get delayed by upto 6 hours. Late data specification in process defines how this late data is handled.
-
-Late data policy defines how frequently check is done to detect late data. The policies supported are: backoff, exp-backoff(exponention backoff) and final(at feed's late cut-off). The policy along with delay defines the interval at which late data check is done.
-
-Late input specification for each input defines the workflow that should run when late data is detected for that input. 
-
-Syntax:
-<verbatim>
-<process name="[process name]">
-...
-    <late-process policy=[late handling policy] delay=[delay]>
-        <late-input input=[input name] workflow-path=[workflow path]/>
-        ...
-    </late-process>
-...
-</process>
-</verbatim>
-
-Example:
-<verbatim>
-<feed name="feed1">
-...
-    <frequency>hours(1)</frequency>
-    <late-arrival cut-off="hours(6)"/>
-...
-</feed>
-<process name="sample-process">
-...
-    <inputs>
-        <input name="input1" feed="feed1" start="today(0,0)" end="today(1,0)"/>
-        ...
-    </inputs>
-    <late-process policy="final">
-        <late-input input="input1" workflow-path="/projects/bootcamp/workflow/lateinput1" />
-        ...
-    </late-process>
-...
-</process>
-</verbatim>
-This late handling specifies that late data detection should run at feed's late cut-off which is 6 hours in this case. If there is late data, Falcon should run the workflow specified at /projects/bootcamp/workflow/lateinput1/workflow.xml
-
-*Note:* This is only supported for !FileSystem storage but not Table storage at this point.
-
----+++ Email Notification
-
-<verbatim>
-    <notification type="email" to="bob@@xyz.com"/>
-</verbatim>
-Specifying the notification element with "type" property allows users to receive email notification when a scheduled process instance completes.
-Multiple recipients of an email can be provided as comma separated addresses with "to" property.
-To send email notification ensure that SMTP parameters are defined in Falcon startup.properties.
-Refer to [[FalconEmailNotification][Falcon Email Notification]] for more details.
-
----+++ ACL
-
-A process has ACL (Access Control List) useful for implementing permission requirements
-and provide a way to set different permissions for specific users or named groups.
-<verbatim>
-    <ACL owner="test-user" group="test-group" permission="*"/>
-</verbatim>
-ACL indicates the Access control list for this cluster.
-owner is the Owner of this entity.
-group is the one which has access to read.
-permission indicates the permission.
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/FalconCLI.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/FalconCLI.twiki b/docs/src/site/twiki/FalconCLI.twiki
deleted file mode 100644
index 5395f12..0000000
--- a/docs/src/site/twiki/FalconCLI.twiki
+++ /dev/null
@@ -1,540 +0,0 @@
----+FalconCLI
-
-FalconCLI is a interface between user and Falcon. It is a command line utility provided by Falcon. FalconCLI supports Entity Management, Instance Management and Admin operations.There is a set of web services that are used by FalconCLI to interact with Falcon.
-
----++Common CLI Options
-
----+++Falcon URL
-
-Optional -url option indicating the URL of the Falcon system to run the command against can be provided.  If not mentioned it will be picked from the system environment variable FALCON_URL. If FALCON_URL is not set then it will be picked from client.properties file. If the option is not
-provided and also not set in client.properties, Falcon CLI will fail.
-
----+++Proxy user support
-
-The -doAs option allows the current user to impersonate other users when interacting with the Falcon system. The current user must be configured as a proxyuser in the Falcon system. The proxyuser configuration may restrict from
-which hosts a user may impersonate users, as well as users of which groups can be impersonated.
-
-<a href="./FalconDocumentation.html#Proxyuser_support">Proxyuser support described here.</a>
-
----+++Debug Mode
-
-If you export FALCON_DEBUG=true then the Falcon CLI will output the Web Services API details used by any commands you execute. This is useful for debugging purposes to or see how the Falcon CLI works with the WS API.
-Alternately, you can specify '-debug' through the CLI arguments to get the debug statements.
-Example:
-$FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml -debug
-
----++Entity Management Operations
-
----+++Submit
-
-Submit option is used to set up entity definition.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -submit -type [cluster|datasource|feed|process] -file <entity-definition.xml>
-
-Example: 
-$FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-
-Note: The url option in the above and all subsequent commands is optional. If not mentioned it will be picked from client.properties file. If the option is not provided and also not set in client.properties, Falcon CLI will fail.
-
----+++Schedule
-
-Once submitted, an entity can be scheduled using schedule option. Process and feed can only be scheduled.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [process|feed] -name <<name>> -schedule
-
-Optional Arg : -skipDryRun -doAs <username>
--properties <<key1:val1,...,keyN:valN>>
-
-<a href="./Restapi/EntitySchedule.html">Optional params described here.</a>
-
-Example:
-$FALCON_HOME/bin/falcon entity  -type process -name sampleProcess -schedule
-
----+++Suspend
-
-Suspend on an entity results in suspension of the oozie bundle that was scheduled earlier through the schedule function. No further instances are executed on a suspended entity. Only schedule-able entities(process/feed) can be suspended.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -suspend
-
----+++Resume
-
-Puts a suspended process/feed back to active, which in turn resumes applicable oozie bundle.
-
-Usage:
- $FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -resume
-
----+++Delete
-
-Delete removes the submitted entity definition for the specified entity and put it into the archive.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [cluster|datasource|feed|process] -name <<name>> -delete
-
----+++List
-
-Entities of a particular type can be listed with list sub-command.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -list
-
-Optional Args : -fields <<field1,field2>>
--type <<[cluster|datasource|feed|process],[cluster|datasource|feed|process]>>
--nameseq <<namesubsequence>> -tagkeys <<tagkeyword1,tagkeyword2>>
--filterBy <<field1:value1,field2:value2>> -tags <<tagkey=tagvalue,tagkey=tagvalue>>
--orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="./Restapi/EntityList.html">Optional params described here.</a>
-
-
----+++Summary
-
-Summary of entities of a particular type and a cluster will be listed. Entity summary has N most recent instances of entity.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [feed|process] -summary
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -fields <<field1,field2>>
--filterBy <<field1:value1,field2:value2>> -tags <<tagkey=tagvalue,tagkey=tagvalue>>
--orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10 -numInstances 7
-
-<a href="./Restapi/EntitySummary.html">Optional params described here.</a>
-
----+++Update
-
-Update operation allows an already submitted/scheduled entity to be updated. Cluster and datasource updates are
-currently not allowed.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -update -file <<path_to_file>>
-
-Optional Arg : -skipDryRun. When this argument is specified, Falcon skips oozie dryrun.
-
-Example:
-$FALCON_HOME/bin/falcon entity -type process -name HourlyReportsGenerator -update -file /process/definition.xml
-
----+++Touch
-
-Force Update operation allows an already submitted/scheduled entity to be updated.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -touch
-
-Optional Arg : -skipDryRun. When this argument is specified, Falcon skips oozie dryrun.
-
----+++Status
-
-Status returns the current status of the entity.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -status
-
----+++Dependency
-
-With the use of dependency option, we can list all the entities on which the specified entity is dependent.
-For example for a feed, dependency return the cluster name and for process it returns all the input feeds,
-output feeds and cluster names.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -dependency
-
----+++Definition
-
-Definition option returns the entity definition submitted earlier during submit step.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -definition
-
-
----+++Lookup
-
-Lookup option tells you which feed does a given path belong to. This can be useful in several scenarios e.g. generally you would want to have a single definition for common feeds like metadata with same location
-otherwise it can result in a problem (different retention durations can result in surprises for one team) If you want to check if there are multiple definitions of same metadata then you can pick
-an instance of that and run through the lookup command like below.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type feed -lookup -path /data/projects/my-hourly/2014/10/10/23/
-
-If you have multiple feeds with location as /data/projects/my-hourly/${YEAR}/${MONTH}/${DAY}/${HOUR} then this command will return all of them.
-
-
----+++SLAAlert
-<verbatim>
-Since: 0.8
-</verbatim>
-
-This command lists all the feed instances which have missed sla and are still not available. If a feed instance missed
-sla but is now available, then it will not be reported in results. The purpose of this API is alerting and hence it
- doesn't return feed instances which missed SLA but are available as they don't require any action.
-
-* Currently sla monitoring is supported only for feeds.
-
-* Option end is optional and will default to current time if missing.
-
-* Option name is optional, if provided only instances of that feed will be considered.
-
-Usage:
-
-*Example 1*
-
-*$FALCON_HOME/bin/falcon entity -type feed -start 2014-09-05T00:00Z -slaAlert  -end 2016-05-03T00:00Z -colo local*
-
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T11:59Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:00Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:01Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:02Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:03Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:04Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:05Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:06Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:07Z, tags: Missed SLA High
-name: out, type: FEED, cluster: local, instanceTime: 2015-09-26T12:08Z, tags: Missed SLA Low
-
-
-Response: default/Success!
-
-Request Id: default/216978070@qtp-830047511-4 - f5a6c129-ab42-4feb-a2bf-c3baed356248
-
-*Example 2*
-
-*$FALCON_HOME/bin/falcon entity -type feed -start 2014-09-05T00:00Z -slaAlert  -end 2016-05-03T00:00Z -colo local -name in*
-
-name: in, type: FEED, cluster: local, instanceTime: 2015-09-26T06:00Z, tags: Missed SLA High
-
-Response: default/Success!
-
-Request Id: default/1580107885@qtp-830047511-7 - f16cbc51-5070-4551-ad25-28f75e5e4cf2
-
-
----++Instance Management Options
-
----+++Kill
-
-Kill sub-command is used to kill all the instances of the specified process whose nominal time is between the given start time and end time.
-
-Note: 
-1. The start time and end time needs to be specified in TZ format.
-Example:   01 Jan 2012 01:00  => 2012-01-01T01:00Z
-
-3. Process name is compulsory parameter for each instance management command.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -kill -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Suspend
-
-Suspend is used to suspend a instance or instances  for the given process. This option pauses the parent workflow at the state, which it was in at the time of execution of this command.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -suspend -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Continue
-
-Continue option is used to continue the failed workflow instance. This option is valid only for process instances in terminal state, i.e. KILLED or FAILED.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -continue -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Rerun
-
-Rerun option is used to rerun instances of a given process. On issuing a rerun, by default the execution resumes from the last failed node in the workflow. This option is valid only for process instances in terminal state, i.e. SUCCEEDED, KILLED or FAILED.
-If one wants to forcefully rerun the entire workflow, -force should be passed along with -rerun
-Additionally, you can also specify properties to override via a properties file and this will be prioritized over force option in case of contradiction.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -rerun -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" [-force] [-file <<properties file>>]
-
----+++Resume
-
-Resume option is used to resume any instance that  is in suspended state.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -resume -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Status
-
-Status option via CLI can be used to get the status of a single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Along with the status of the instance time is also returned. Log location gives the oozie workflow url
-If the instance is in WAITING state, missing dependencies are listed.
-The job urls are populated for all actions of user workflow and non-succeeded actions of the main-workflow. The user then need not go to the underlying scheduler to get the job urls when needed to debug an issue in the job.
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}] 
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -colo <<colo>>
--filterBy <<field1:value1,field2:value2>> -lifecycle <<lifecycles>>
--orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
--allAttempts To get all the attempts for corresponding instances
-
-<a href="./Restapi/InstanceStatus.html"> Optional params described here.</a>
-
----+++List
-
-List option via CLI can be used to get single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Instance time is also returned. Log location gives the oozie workflow url
-If the instance is in WAITING state, missing dependencies are listed
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}]}
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -list
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
--colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
--allAttempts To get all the attempts for corresponding instances
-
-
-<a href="./Restapi/InstanceList.html">Optional params described here.</a>
-
----+++Summary
-
-Summary option via CLI can be used to get the consolidated status of the instances between the specified time period.
-Each status along with the corresponding instance count are listed for each of the applicable colos.
-The unscheduled instances between the specified time period are included as UNSCHEDULED in the output to provide more clarity.
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getSummary is successful", instancesSummary:[{"cluster": <<name>> "map":[{"SUCCEEDED":"1"}, {"WAITING":"1"}, {"RUNNING":"1"}]}]}
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -summary
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -colo <<colo>>
--filterBy <<field1:value1,field2:value2>> -lifecycle <<lifecycles>>
--orderBy field -sortOrder <<sortOrder>>
-
-<a href="./Restapi/InstanceSummary.html">Optional params described here.</a>
-
----+++Running
-
-Running option provides all the running instances of the mentioned process.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -running
-
-Optional Args : -colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="./Restapi/InstanceRunning.html">Optional params described here.</a>
-
----+++FeedInstanceListing
-
-Get falcon feed instance availability.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type feed -name <<name>> -listing
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
--colo <<colo>>
-
-<a href="./Restapi/FeedInstanceListing.html">Optional params described here.</a>
-
----+++Logs
-
-Get logs for instance actions
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -logs
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -runid <<runid>>
--colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="./Restapi/InstanceLogs.html">Optional params described here.</a>
-
----+++LifeCycle
-
-Describes list of life cycles of a entity , for feed it can be replication/retention and for process it can be execution.
-This can be used with instance management options. Default values are replication for feed and execution for process.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status -lifecycle <<lifecycletype>> -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Triage
-
-Given a feed/process instance this command traces it's ancestors to find what all ancestors have failed. It's useful if
-lot of instances are failing in a pipeline as it then finds out the root cause of the pipeline being stuck.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -triage -type <<feed/process>> -name <<name>> -start "yyyy-MM-dd'T'HH:mm'Z'"
-
----+++Params
-
-Displays the workflow params of a given instance. Where start time is considered as nominal time of that instance and end time won't be considered.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -params -start "yyyy-MM-dd'T'HH:mm'Z'"
-
-
-
----+++Dependency
-Display the dependent instances which are dependent on the given instance. For example for a given process instance it will
-list all the input feed instances(if any) and the output feed instances(if any).
-
-An example use case of this command is as follows:
-Suppose you find out that the data in a feed instance was incorrect and you need to figure out which all process instances
-consumed this feed instance so that you can reprocess them after correcting the feed instance. You can give the feed instance
-and it will tell you which process instance produced this feed and which all process instances consumed this feed.
-
-NOTE:
-1. instanceTime must be a valid instanceTime e.g. instanceTime of a feed should be in it's validity range on applicable clusters,
- and it should be in the range of instances produced by the producer process(if any)
-
-2. For processes with inputs like latest() which vary with time the results are not guaranteed to be correct.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -params -instanceTime "yyyy-MM-dd'T'HH:mm'Z'"
-
-For example:
-$FALCON_HOME/bin/falcon instance -dependency -type feed -name out -instanceTime 2014-12-15T00:00Z
-name: producer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:00Z, tags: Output
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:03Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:04Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:02Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:05Z, tags: Input
-
-
-Response: default/Success!
-
-Request Id: default/1125035965@qtp-503156953-7 - 447be0ad-1d38-4dce-b438-20f3de69b172
-
-
-<a href="./Restapi/InstanceDependency.html">Optional params described here.</a>
-
----++ Metadata Lineage Options
-
----+++Lineage
-
-Returns the relationship between processes and feeds in a given pipeline in <a href="http://www.graphviz.org/content/dot-language">dot</a> format.
-You can use the output and view a graphical representation of DAG using an online graphviz viewer like <a href="http://graphviz-dev.appspot.com/">this</a>.
-
-
-Usage:
-
-$FALCON_HOME/bin/falcon metadata -lineage -pipeline my-pipeline
-
-pipeline is a mandatory option.
-
-
-
----+++ Vertex
-
-Get the vertex with the specified id.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -vertex -id <<id>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -vertex -id 4
-
----+++ Vertices
-
-Get all vertices for a key index given the specified value.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -vertices -key <<key>> -value <<value>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -vertices -key type -value feed-instance
-
----+++ Vertex Edges
-
-Get the adjacent vertices or edges of the vertex with the specified direction.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -edges -id <<vertex-id>> -direction <<direction>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -edges -id 4 -direction both
-$FALCON_HOME/bin/falcon metadata -edges -id 4 -direction inE
-
----+++ Edge
-
-Get the edge with the specified id.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -edge -id <<id>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -edge -id Q9n-Q-5g
-
----++ Metadata Discovery Options
-
----+++ List
-
-Lists of all dimensions of given type. If the user provides optional param cluster, only the dimensions related to the cluster are listed.
-Usage:
-$FALCON_HOME/bin/falcon metadata -list -type [cluster_entity|datasource_entity|feed_entity|process_entity|user|colo|tags|groups|pipelines|replication_metrics]
-
-Optional Args : -cluster <<cluster name>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -list -type process_entity -cluster primary-cluster
-$FALCON_HOME/bin/falcon metadata -list -type tags
-
-
-To display replication metrics from recipe based replication process and from feed replication.
-Usage:
-$FALCON_HOME/bin/falcon metadata -list -type replication_metrics -process/-feed <entity name>
-Optional Args : -numResults <<value>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -list -type replication_metrics -process hdfs-replication
-$FALCON_HOME/bin/falcon metadata -list -type replication_metrics -feed fs-replication
-
-
----+++ Relations
-
-List all dimensions related to specified Dimension identified by dimension-type and dimension-name.
-Usage:
-$FALCON_HOME/bin/falcon metadata -relations -type [cluster_entity|feed_entity|process_entity|user|colo|tags|groups|pipelines] -name <<Dimension Name>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -relations -type process_entity -name sample-process
-
-
----++Admin Options
-
----+++Help
-
-Usage:
-$FALCON_HOME/bin/falcon admin -help
-
----+++Version
-
-Version returns the current version of Falcon installed.
-Usage:
-$FALCON_HOME/bin/falcon admin -version
-
----+++Status
-
-Status returns the current state of Falcon (running or stopped).
-Usage:
-$FALCON_HOME/bin/falcon admin -status
-
-
----++ Recipe Options
-
----+++ Submit Recipe
-
-Submit the specified recipe.
-
-Usage:
-$FALCON_HOME/bin/falcon recipe -name <name>
-Name of the recipe. User should have defined <name>-template.xml and <name>.properties in the path specified by falcon.recipe.path in client.properties file. falcon.home path is used if its not specified in client.properties file.
-If its not specified in client.properties file and also if files cannot be found at falcon.home, Falcon CLI will fail.
-
-Optional Args : -tool <recipeToolClassName>
-Falcon provides a base tool that recipes can override. If this option is not specified the default Recipe Tool
-RecipeTool defined is used. This option is required if user defines his own recipe tool class.
-
-Example:
-$FALCON_HOME/bin/falcon recipe -name hdfs-replication
-


[13/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/InstanceUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/InstanceUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/InstanceUtil.java
deleted file mode 100644
index b07e275..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/InstanceUtil.java
+++ /dev/null
@@ -1,855 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonSyntaxException;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.request.BaseRequest;
-import org.apache.falcon.resource.APIResult;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.falcon.resource.InstanceDependencyResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-import org.apache.falcon.resource.TriageResult;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.http.HttpResponse;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.BundleJob;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.Job.Status;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.apache.oozie.client.WorkflowJob;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.json.JSONException;
-import org.testng.Assert;
-
-import java.io.IOException;
-import java.lang.reflect.Type;
-import java.net.URISyntaxException;
-import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * util functions related to instanceTest.
- */
-public final class InstanceUtil {
-
-    public static final int INSTANCES_CREATED_TIMEOUT = OSUtil.IS_WINDOWS ? 20 : 10;
-    private static final Logger LOGGER = Logger.getLogger(InstanceUtil.class);
-    private static final EnumSet<Status> LIVE_STATUS = EnumSet.of(Status.RUNNING,
-        Status.PREP, Status.SUCCEEDED, Status.SUSPENDED);
-
-    private InstanceUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static APIResult sendRequestProcessInstance(String url, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return hitUrl(url, Util.getMethodType(url), user);
-    }
-
-    public static APIResult hitUrl(String url,
-            String method, String user) throws URISyntaxException,
-            IOException, AuthenticationException, InterruptedException {
-        BaseRequest request = new BaseRequest(url, method, user);
-        HttpResponse response = request.run();
-        String responseString = IOUtils.toString(response.getEntity().getContent(), "UTF-8");
-        LOGGER.info("The web service response is:\n" + Util.prettyPrintXmlOrJson(responseString));
-        APIResult result;
-        if (url.contains("/summary/")) {
-            result = new InstancesSummaryResult(APIResult.Status.FAILED, responseString);
-        }else if (url.contains("/listing/")) {
-            result = new FeedInstanceResult(APIResult.Status.FAILED, responseString);
-        }else if (url.contains("instance/dependencies")) {
-            result = new InstanceDependencyResult(APIResult.Status.FAILED, responseString);
-        }else if (url.contains("instance/triage")) {
-            result = new TriageResult(APIResult.Status.FAILED, responseString);
-        }else {
-            result = new InstancesResult(APIResult.Status.FAILED, responseString);
-        }
-        Assert.assertNotNull(result, "APIResult is null");
-        for (ResponseErrors error : ResponseErrors.values()) {
-            if (responseString.contains(error.getError())) {
-                return result;
-            }
-        }
-        final String[] errorStrings = {
-            "(FEED) not found",
-            "is beforePROCESS  start",
-            "is after end date",
-            "is after PROCESS's end",
-            "is before PROCESS's  start",
-            "is before the entity was scheduled",
-        };
-        for (String error : errorStrings) {
-            if (responseString.contains(error)) {
-                return result;
-            }
-        }
-        try {
-            result = new GsonBuilder().registerTypeAdapter(Date.class, new JsonDeserializer<Date>() {
-                @Override
-                public Date deserialize(JsonElement json, Type t, JsonDeserializationContext c) {
-                    return new DateTime(json.getAsString()).toDate();
-                }
-            }).create().fromJson(responseString, getClassOfResult(url));
-        } catch (JsonSyntaxException e) {
-            Assert.fail("Not a valid json:\n" + responseString);
-        }
-        LOGGER.info("statusCode: " + response.getStatusLine().getStatusCode());
-        LOGGER.info("message: " + result.getMessage());
-        LOGGER.info("APIResult.Status: " + result.getStatus());
-        return result;
-    }
-
-    /**
-     * Returns API result class matching to API request url.
-     */
-    private static Class<? extends APIResult> getClassOfResult(String url) {
-        final Class<? extends APIResult> classOfResult;
-        if (url.contains("/listing/")) {
-            classOfResult = FeedInstanceResult.class;
-        } else if (url.contains("/summary/")) {
-            classOfResult = InstancesSummaryResult.class;
-        } else if (url.contains("instance/dependencies")) {
-            classOfResult = InstanceDependencyResult.class;
-        } else if (url.contains("instance/triage")) {
-            classOfResult = TriageResult.class;
-        } else {
-            classOfResult = InstancesResult.class;
-        }
-        return classOfResult;
-    }
-
-    /**
-     * Checks if API response reflects success and if it's instances match to expected status.
-     *
-     * @param instancesResult  - kind of response from API which should contain information about
-     *                           instances
-     * @param bundle           - bundle from which process instances are being analyzed
-     * @param wfStatus - - expected status of instances
-     */
-    public static void validateSuccess(InstancesResult instancesResult, Bundle bundle,
-            InstancesResult.WorkflowStatus wfStatus) {
-        Assert.assertEquals(instancesResult.getStatus(), APIResult.Status.SUCCEEDED);
-        Assert.assertEquals(instancesInResultWithStatus(instancesResult, wfStatus),
-            bundle.getProcessConcurrency());
-    }
-
-    /**
-     * Check the number of instances in response which have the same status as expected.
-     *
-     * @param instancesResult  kind of response from API which should contain information about
-     *                         instances
-     * @param workflowStatus   expected status of instances
-     * @return number of instances which have expected status
-     */
-    public static int instancesInResultWithStatus(InstancesResult instancesResult,
-            InstancesResult.WorkflowStatus workflowStatus) {
-        InstancesResult.Instance[] instances = instancesResult.getInstances();
-        LOGGER.info("instances: " + Arrays.toString(instances));
-        List<InstancesResult.WorkflowStatus> statuses =
-            new ArrayList<>();
-        for (InstancesResult.Instance instance : instances) {
-            LOGGER.info("instance: " + instance + " status = " + instance.getStatus());
-            statuses.add(instance.getStatus());
-        }
-        return Collections.frequency(statuses, workflowStatus);
-    }
-
-    /**
-     * Validates that response doesn't contains instances.
-     * @param r response
-     */
-    public static void validateSuccessWOInstances(InstancesResult r) {
-        AssertUtil.assertSucceeded(r);
-        Assert.assertNull(r.getInstances(), "Unexpected :" + Arrays.toString(r.getInstances()));
-    }
-
-    /**
-     * Validates that failed response contains specific error message.
-     * @param instancesResult response
-     * @param error expected error
-     */
-    public static void validateError(InstancesResult instancesResult, ResponseErrors error) {
-        Assert.assertTrue(instancesResult.getMessage().contains(error.getError()),
-            "Error should contains '" + error.getError() + "'");
-    }
-
-    /**
-     * Checks that actual number of instances with different statuses are equal to expected number
-     * of instances with matching statuses.
-     *
-     * @param instancesResult kind of response from API which should contain information about
-     *                        instances <p/>
-     *                        All parameters below reflect number of expected instances with some
-     *                        kind of status.
-     * @param totalCount      total number of instances.
-     * @param runningCount    number of running instances.
-     * @param suspendedCount  number of suspended instance.
-     * @param waitingCount    number of waiting instance.
-     * @param killedCount     number of killed instance.
-     */
-    public static void validateResponse(InstancesResult instancesResult, int totalCount,
-            int runningCount, int suspendedCount, int waitingCount, int killedCount) {
-        InstancesResult.Instance[] instances = instancesResult.getInstances();
-        LOGGER.info("instances: " + Arrays.toString(instances));
-        Assert.assertNotNull(instances, "instances should be not null");
-        Assert.assertEquals(instances.length, totalCount, "Total Instances");
-        List<InstancesResult.WorkflowStatus> statuses = new ArrayList<>();
-        for (InstancesResult.Instance instance : instances) {
-            final InstancesResult.WorkflowStatus status = instance.getStatus();
-            LOGGER.info("status: " + status + ", instance: " + instance.getInstance());
-            statuses.add(status);
-        }
-        Assert.assertEquals(Collections.frequency(statuses, InstancesResult.WorkflowStatus.RUNNING),
-            runningCount, "Running Instances");
-        Assert.assertEquals(Collections.frequency(statuses, InstancesResult.WorkflowStatus.SUSPENDED),
-            suspendedCount, "Suspended Instances");
-        Assert.assertEquals(Collections.frequency(statuses, InstancesResult.WorkflowStatus.WAITING),
-            waitingCount, "Waiting Instances");
-        Assert.assertEquals(Collections.frequency(statuses, InstancesResult.WorkflowStatus.KILLED),
-            killedCount, "Killed Instances");
-    }
-
-    /**
-     * Retrieves workflow IDs from every instances from response.
-     * @param instancesResult response
-     * @return list of workflow IDs
-     */
-    public static List<String> getWorkflowJobIds(InstancesResult instancesResult) {
-        InstancesResult.Instance[] instances = instancesResult.getInstances();
-        LOGGER.info("Instances: " + Arrays.toString(instances));
-        Assert.assertNotNull(instances, "Instances should be not null");
-        List<String> wfIds = new ArrayList<>();
-        for (InstancesResult.Instance instance : instances) {
-            LOGGER.warn(String.format(
-                "instance: %s, status: %s, logs : %s", instance, instance.getStatus(), instance.getLogFile()));
-            if (instance.getStatus().name().equals("RUNNING") || instance.getStatus().name().equals("SUCCEEDED")) {
-                wfIds.add(instance.getLogFile());
-            }
-            if (instance.getStatus().name().equals("KILLED") || instance.getStatus().name().equals("WAITING")) {
-                Assert.assertNull(instance.getLogFile());
-            }
-        }
-        return wfIds;
-    }
-
-    /**
-     * Checks that expected number of failed instances matches actual number of failed ones.
-     *
-     * @param instancesResult kind of response from API which should contain information about
-     *                        instances.
-     * @param failCount number of instances which should be failed.
-     */
-    public static void validateFailedInstances(InstancesResult instancesResult, int failCount) {
-        AssertUtil.assertSucceeded(instancesResult);
-        int counter = 0;
-        for (InstancesResult.Instance oneInstance : instancesResult.getInstances()) {
-            if (oneInstance.getStatus() == InstancesResult.WorkflowStatus.FAILED) {
-                counter++;
-            }
-        }
-        Assert.assertEquals(counter, failCount, "Actual number of failed instances does not "
-            + "match to expected number of failed instances.");
-    }
-
-    /**
-     * Gets process workflows by given statuses.
-     * @param oozieClient oozie client of cluster where process is running
-     * @param processName process name
-     * @param statuses statuses workflows will be selected by
-     * @return list of matching workflows
-     * @throws OozieClientException
-     */
-    public static List<String> getWorkflows(OozieClient oozieClient, String processName,
-            WorkflowJob.Status... statuses) throws OozieClientException {
-        String bundleID = OozieUtil.getBundles(oozieClient, processName, EntityType.PROCESS).get(0);
-        List<String> workflowJobIds = OozieUtil.getWorkflowJobs(oozieClient, bundleID);
-
-        List<String> toBeReturned = new ArrayList<>();
-        for (String jobId : workflowJobIds) {
-            WorkflowJob wfJob = oozieClient.getJobInfo(jobId);
-            LOGGER.info("wfJob.getId(): " + wfJob.getId() + " wfJob.getStartTime(): "
-                + wfJob.getStartTime() + "jobId: " + jobId + "  wfJob.getStatus(): " + wfJob.getStatus());
-            if (statuses.length == 0 || Arrays.asList(statuses).contains(wfJob.getStatus())) {
-                toBeReturned.add(jobId);
-            }
-        }
-        return toBeReturned;
-    }
-
-    public static boolean isWorkflowRunning(OozieClient oozieClient, String workflowID) throws
-            OozieClientException {
-        WorkflowJob.Status status = oozieClient.getJobInfo(workflowID).getStatus();
-        return status == WorkflowJob.Status.RUNNING;
-    }
-
-    public static void areWorkflowsRunning(OozieClient oozieClient, List<String> workflowIds,
-            int totalWorkflows, int runningWorkflows, int killedWorkflows,
-            int succeededWorkflows) throws OozieClientException {
-        if (totalWorkflows != -1) {
-            Assert.assertEquals(workflowIds.size(), totalWorkflows);
-        }
-        final List<WorkflowJob.Status> statuses = new ArrayList<>();
-        for (String wfId : workflowIds) {
-            final WorkflowJob.Status status = oozieClient.getJobInfo(wfId).getStatus();
-            LOGGER.info("wfId: " + wfId + " status: " + status);
-            statuses.add(status);
-        }
-        if (runningWorkflows != -1) {
-            Assert.assertEquals(Collections.frequency(statuses, WorkflowJob.Status.RUNNING),
-                runningWorkflows, "Number of running jobs doesn't match.");
-        }
-        if (killedWorkflows != -1) {
-            Assert.assertEquals(Collections.frequency(statuses, WorkflowJob.Status.KILLED),
-                killedWorkflows, "Number of killed jobs doesn't match.");
-        }
-        if (succeededWorkflows != -1) {
-            Assert.assertEquals(Collections.frequency(statuses, WorkflowJob.Status.SUCCEEDED),
-                succeededWorkflows, "Number of succeeded jobs doesn't match.");
-        }
-    }
-
-    public static List<CoordinatorAction> getProcessInstanceList(OozieClient oozieClient,
-            String processName, EntityType entityType) throws OozieClientException {
-        String coordId = OozieUtil.getLatestCoordinatorID(oozieClient, processName, entityType);
-        //String coordId = getDefaultCoordinatorFromProcessName(processName);
-        LOGGER.info("default coordID: " + coordId);
-        return oozieClient.getCoordJobInfo(coordId).getActions();
-    }
-
-    public static int getInstanceCountWithStatus(OozieClient oozieClient, String processName,
-            CoordinatorAction.Status status, EntityType entityType) throws OozieClientException {
-        List<CoordinatorAction> coordActions = getProcessInstanceList(oozieClient, processName, entityType);
-        List<CoordinatorAction.Status> statuses = new ArrayList<>();
-        for (CoordinatorAction action : coordActions) {
-            statuses.add(action.getStatus());
-        }
-        return Collections.frequency(statuses, status);
-    }
-
-    /**
-     * Retrieves status of one instance.
-     *
-     * @param oozieClient     - server from which instance status will be retrieved.
-     * @param processName    - name of process which mentioned instance belongs to.
-     * @param bundleNumber   - ordinal number of one of the bundle which are related to that
-     *                         process.
-     * @param instanceNumber - ordinal number of instance which state will be returned.
-     * @return - state of mentioned instance.
-     * @throws OozieClientException
-     */
-    public static CoordinatorAction.Status getInstanceStatus(OozieClient oozieClient, String processName,
-            int bundleNumber, int instanceNumber) throws OozieClientException {
-        String bundleID = OozieUtil.getSequenceBundleID(oozieClient, processName, EntityType.PROCESS, bundleNumber);
-        if (StringUtils.isEmpty(bundleID)) {
-            return null;
-        }
-        String coordID = OozieUtil.getDefaultCoordIDFromBundle(oozieClient, bundleID);
-        if (StringUtils.isEmpty(coordID)) {
-            return null;
-        }
-        CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-        if (coordInfo == null) {
-            return null;
-        }
-        LOGGER.info("coordInfo = " + coordInfo);
-        List<CoordinatorAction> actions = coordInfo.getActions();
-        if (actions.size() == 0) {
-            return null;
-        }
-        LOGGER.info("actions = " + actions);
-        return actions.get(instanceNumber).getStatus();
-    }
-
-    /**
-     * Forms and sends process instance request based on url of action to be performed and it's
-     * parameters.
-     *
-     * @param colo - servers on which action should be performed
-     * @param user - whose credentials will be used for this action
-     * @return result from API
-     */
-    public static APIResult createAndSendRequestProcessInstance(String url, String params, String colo, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        if (params != null && !colo.equals("")) {
-            url = url + params + "&" + colo.substring(1);
-        } else if (params != null) {
-            url = url + params;
-        } else {
-            url = url + colo;
-        }
-        return sendRequestProcessInstance(url, user);
-    }
-
-    public static org.apache.oozie.client.WorkflowJob.Status getInstanceStatusFromCoord(
-            OozieClient oozieClient, String coordID, int instanceNumber) throws OozieClientException {
-        CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-        String jobId = coordInfo.getActions().get(instanceNumber).getExternalId();
-        LOGGER.info("jobId = " + jobId);
-        if (jobId == null) {
-            return null;
-        }
-        WorkflowJob actionInfo = oozieClient.getJobInfo(jobId);
-        return actionInfo.getStatus();
-    }
-
-    public static List<String> getInputFoldersForInstanceForReplication(
-            OozieClient oozieClient, String coordID, int instanceNumber) throws OozieClientException {
-        CoordinatorAction x = oozieClient.getCoordActionInfo(coordID + "@" + instanceNumber);
-        String jobId = x.getExternalId();
-        WorkflowJob wfJob = oozieClient.getJobInfo(jobId);
-        return getReplicationFolderFromInstanceRunConf(wfJob.getConf());
-    }
-
-    private static List<String> getReplicationFolderFromInstanceRunConf(String runConf) {
-        String conf;
-        conf = runConf.substring(runConf.indexOf("falconInPaths</name>") + 20);
-        conf = conf.substring(conf.indexOf("<value>") + 7);
-        conf = conf.substring(0, conf.indexOf("</value>"));
-        return new ArrayList<>(Arrays.asList(conf.split(",")));
-    }
-
-    public static int getInstanceRunIdFromCoord(OozieClient oozieClient, String coordID, int instanceNumber)
-        throws OozieClientException {
-        CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-        WorkflowJob actionInfo = oozieClient.getJobInfo(coordInfo.getActions().get(instanceNumber).getExternalId());
-        return actionInfo.getRun();
-    }
-
-    public static int checkIfFeedCoordExist(AbstractEntityHelper helper,
-            String feedName, String coordType) throws OozieClientException {
-        LOGGER.info("feedName: " + feedName);
-        int numberOfCoord = 0;
-
-        final OozieClient oozieClient = helper.getOozieClient();
-        if (OozieUtil.getBundles(oozieClient, feedName, EntityType.FEED).size() == 0) {
-            return 0;
-        }
-        List<String> bundleIds = OozieUtil.getBundles(oozieClient, feedName, EntityType.FEED);
-        LOGGER.info("bundleIds: " + bundleIds);
-
-        for (String bundleId : bundleIds) {
-            LOGGER.info("bundleId: " + bundleId);
-            OozieUtil.waitForCoordinatorJobCreation(oozieClient, bundleId);
-            List<CoordinatorJob> coords =
-                    OozieUtil.getBundleCoordinators(oozieClient, bundleId);
-            LOGGER.info("coords: " + coords);
-            for (CoordinatorJob coord : coords) {
-                if (coord.getAppName().contains(coordType)) {
-                    numberOfCoord++;
-                }
-            }
-        }
-        return numberOfCoord;
-    }
-
-    public static List<CoordinatorAction> getProcessInstanceListFromAllBundles(
-            OozieClient oozieClient, String processName, EntityType entityType)
-        throws OozieClientException {
-        List<CoordinatorAction> list = new ArrayList<>();
-        final List<String> bundleIds = OozieUtil.getBundles(oozieClient, processName, entityType);
-        LOGGER.info("bundle size for process is " + bundleIds.size());
-        for (String bundleId : bundleIds) {
-            BundleJob bundleInfo = oozieClient.getBundleJobInfo(bundleId);
-            List<CoordinatorJob> coordJobs = bundleInfo.getCoordinators();
-            LOGGER.info("number of coordJobs in bundle " + bundleId + "=" + coordJobs.size());
-            for (CoordinatorJob coordJob : coordJobs) {
-                List<CoordinatorAction> actions =
-                        oozieClient.getCoordJobInfo(coordJob.getId()).getActions();
-                LOGGER.info("number of actions in coordinator " + coordJob.getId() + " is "
-                        + actions.size());
-                list.addAll(actions);
-            }
-        }
-        String coordId = OozieUtil.getLatestCoordinatorID(oozieClient, processName, entityType);
-        LOGGER.info("default coordID: " + coordId);
-        return list;
-    }
-
-    public static String getOutputFolderForInstanceForReplication(OozieClient oozieClient,
-            String coordID, int instanceNumber) throws OozieClientException {
-        CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-        final CoordinatorAction coordAction = coordInfo.getActions().get(instanceNumber);
-        final String actionConf = oozieClient.getJobInfo(coordAction.getExternalId()).getConf();
-        return getReplicatedFolderFromInstanceRunConf(actionConf);
-    }
-
-    private static String getReplicatedFolderFromInstanceRunConf(String runConf) {
-        String inputPathExample = getReplicationFolderFromInstanceRunConf(runConf).get(0);
-        String postFix = inputPathExample.substring(inputPathExample.length() - 7, inputPathExample.length());
-        return getReplicatedFolderBaseFromInstanceRunConf(runConf) + postFix;
-    }
-
-    public static String getOutputFolderBaseForInstanceForReplication(
-            OozieClient oozieClient, String coordID, int instanceNumber) throws OozieClientException {
-        CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-        final CoordinatorAction coordAction = coordInfo.getActions().get(instanceNumber);
-        final String actionConf = oozieClient.getJobInfo(coordAction.getExternalId()).getConf();
-        return getReplicatedFolderBaseFromInstanceRunConf(actionConf);
-    }
-
-    private static String getReplicatedFolderBaseFromInstanceRunConf(String runConf) {
-        String conf = runConf.substring(runConf.indexOf("distcpTargetPaths</name>") + 24);
-        conf = conf.substring(conf.indexOf("<value>") + 7);
-        conf = conf.substring(0, conf.indexOf("</value>"));
-        return conf;
-    }
-
-    /**
-     * Waits till supplied number of instances of process/feed reach expected state during
-     * specific time.
-     *
-     * @param client             oozie client to retrieve info about instances
-     * @param entityName         name of feed or process
-     * @param instancesNumber    instance number for which we wait to reach the required status
-     * @param expectedStatus     expected status we are waiting for
-     * @param entityType         type of entity - feed or process expected
-     * @param totalMinutesToWait time in minutes for which instance state should be polled
-     * @throws OozieClientException
-     */
-    public static void waitTillInstanceReachState(OozieClient client, String entityName, int instancesNumber,
-            CoordinatorAction.Status expectedStatus, EntityType entityType, int totalMinutesToWait)
-        throws OozieClientException {
-        String filter;
-        // get the bundle ids
-        if (entityType.equals(EntityType.FEED)) {
-            filter = "name=FALCON_FEED_" + entityName;
-        } else {
-            filter = "name=FALCON_PROCESS_" + entityName;
-        }
-        List<BundleJob> bundleJobs = new ArrayList<>();
-        for (int retries = 0; retries < 20; ++retries) {
-            bundleJobs = OozieUtil.getBundles(client, filter, 0, 10);
-            if (bundleJobs.size() > 0) {
-                break;
-            }
-            TimeUtil.sleepSeconds(5);
-        }
-        if (bundleJobs.size() == 0) {
-            Assert.fail("Could not retrieve bundles");
-        }
-        List<String> bundleIds = OozieUtil.getBundleIds(bundleJobs);
-        Collections.sort(bundleIds, Collections.reverseOrder());
-        String coordId = null;
-        for (String bundleId : bundleIds) {
-            LOGGER.info(String.format("Using bundle %s", bundleId));
-            final Status status = client.getBundleJobInfo(bundleId).getStatus();
-            Assert.assertTrue(LIVE_STATUS.contains(status),
-                String.format("Bundle job %s is should be prep/running but is %s", bundleId, status));
-            OozieUtil.waitForCoordinatorJobCreation(client, bundleId);
-            List<CoordinatorJob> coords = client.getBundleJobInfo(bundleId).getCoordinators();
-            List<String> cIds = new ArrayList<>();
-            if (entityType == EntityType.PROCESS) {
-                for (CoordinatorJob coord : coords) {
-                    cIds.add(coord.getId());
-                }
-                coordId = OozieUtil.getMinId(cIds);
-                break;
-            } else {
-                for (CoordinatorJob coord : coords) {
-                    if (coord.getAppName().contains("FEED_REPLICATION")) {
-                        cIds.add(coord.getId());
-                    }
-                }
-                if (!cIds.isEmpty()) {
-                    coordId = cIds.get(0);
-                    break;
-                }
-            }
-        }
-        Assert.assertNotNull(coordId, "Coordinator id not found");
-        LOGGER.info(String.format("Using coordinator id: %s", coordId));
-        int maxTries = 50;
-        int totalSleepTime = totalMinutesToWait * 60;
-        int sleepTime = totalSleepTime / maxTries;
-        LOGGER.info(String.format("Sleep for %d seconds", sleepTime));
-        for (int i = 0; i < maxTries; i++) {
-            LOGGER.info(String.format("Try %d of %d", (i + 1), maxTries));
-            CoordinatorJob coordinatorJob = client.getCoordJobInfo(coordId);
-            final Status coordinatorStatus = coordinatorJob.getStatus();
-            if (expectedStatus != CoordinatorAction.Status.TIMEDOUT){
-                Assert.assertTrue(LIVE_STATUS.contains(coordinatorStatus),
-                        String.format("Coordinator %s should be running/prep but is %s.", coordId, coordinatorStatus));
-            }
-            List<CoordinatorAction> coordinatorActions = coordinatorJob.getActions();
-            int instanceWithStatus = 0;
-            for (CoordinatorAction coordinatorAction : coordinatorActions) {
-                LOGGER.info(String.format("Coordinator Action %s status is %s on oozie %s",
-                    coordinatorAction.getId(), coordinatorAction.getStatus(), client.getOozieUrl()));
-                if (expectedStatus == coordinatorAction.getStatus()) {
-                    instanceWithStatus++;
-                }
-            }
-            if (instanceWithStatus >= instancesNumber) {
-                return;
-            } else {
-                TimeUtil.sleepSeconds(sleepTime);
-            }
-        }
-        Assert.fail("expected state of instance was never reached");
-    }
-
-    /**
-     * Waits till supplied number of instances of process/feed reach expected state during
-     * specific time.
-     *
-     * @param client           oozie client to retrieve info about instances
-     * @param entityName       name of feed or process
-     * @param numberOfInstance number of instances which status we are waiting for
-     * @param expectedStatus   expected status we are waiting for
-     * @param entityType       type of entity - feed or process expected
-     */
-    public static void waitTillInstanceReachState(OozieClient client, String entityName,
-            int numberOfInstance,
-            CoordinatorAction.Status expectedStatus,
-            EntityType entityType)
-        throws OozieClientException {
-        int totalMinutesToWait = getMinutesToWait(entityType, expectedStatus);
-        waitTillInstanceReachState(client, entityName, numberOfInstance, expectedStatus,
-                entityType, totalMinutesToWait);
-    }
-
-    /**
-     * Generates time which is presumably needed for process/feed instances to reach particular
-     * state.
-     * Feed instances are running faster then process, so feed timeouts are less then process.
-     *
-     * @param entityType     type of entity which instances status we are waiting for
-     * @param expectedStatus expected status we are waiting for
-     * @return minutes to wait for expected status
-     */
-    private static int getMinutesToWait(EntityType entityType, CoordinatorAction.Status expectedStatus) {
-        switch (expectedStatus) {
-        case RUNNING:
-            if (entityType == EntityType.PROCESS) {
-                return OSUtil.IS_WINDOWS ? 20 : 10;
-            } else if (entityType == EntityType.FEED) {
-                return OSUtil.IS_WINDOWS ? 10 : 5;
-            }
-        case WAITING:
-            return OSUtil.IS_WINDOWS ? 6 : 3;
-        case SUCCEEDED:
-            if (entityType == EntityType.PROCESS) {
-                return OSUtil.IS_WINDOWS ? 25 : 15;
-            } else if (entityType == EntityType.FEED) {
-                return OSUtil.IS_WINDOWS ? 20 : 10;
-            }
-        case KILLED:
-        case TIMEDOUT:
-            return OSUtil.IS_WINDOWS ? 40 : 20;
-        default:
-            return OSUtil.IS_WINDOWS ? 30 : 15;
-        }
-    }
-
-    /**
-     * Waits till instances of specific job will be created during specific time.
-     * Use this method directly in unusual test cases where timeouts are different from trivial.
-     * In other cases use waitTillInstancesAreCreated(OozieClient,String,int)
-     *
-     * @param oozieClient oozie client of the cluster on which job is running
-     * @param entity      definition of entity which describes job
-     * @param bundleSeqNo bundle number if update has happened.
-     * @throws OozieClientException
-     */
-    public static void waitTillInstancesAreCreated(OozieClient oozieClient, String entity, int bundleSeqNo,
-            int totalMinutesToWait) throws OozieClientException {
-        String entityName = Util.readEntityName(entity);
-        EntityType type = Util.getEntityType(entity);
-        String bundleID = OozieUtil.getSequenceBundleID(oozieClient, entityName,
-            type, bundleSeqNo);
-        String coordID = OozieUtil.getDefaultCoordIDFromBundle(oozieClient, bundleID);
-        for (int sleepCount = 0; sleepCount < totalMinutesToWait; sleepCount++) {
-            CoordinatorJob coordInfo = oozieClient.getCoordJobInfo(coordID);
-
-            if (coordInfo.getActions().size() > 0) {
-                break;
-            }
-            LOGGER.info("Coord " + coordInfo.getId() + " still doesn't have "
-                + "instance created on oozie: " + oozieClient.getOozieUrl());
-            TimeUtil.sleepSeconds(5);
-        }
-    }
-
-    /**
-     * Waits till instances of specific job will be created during timeout.
-     * Timeout is common for most of usual test cases.
-     *
-     * @param oozieClient  oozieClient of cluster job is running on
-     * @param entity      definition of entity which describes job
-     * @param bundleSeqNo bundle number if update has happened.
-     * @throws OozieClientException
-     */
-    public static void waitTillInstancesAreCreated(OozieClient oozieClient, String entity, int bundleSeqNo
-    ) throws OozieClientException {
-        int sleep = INSTANCES_CREATED_TIMEOUT * 60 / 5;
-        waitTillInstancesAreCreated(oozieClient, entity, bundleSeqNo, sleep);
-    }
-
-    /**
-     * Asserts instances of specific job will be present for given instanceTime.
-     *
-     * @param instancesResult  InstanceDependencyResult
-     * @param oozieClient  oozieClient of cluster job is running on
-     * @param bundleID     bundleId of job
-     * @param time  instanceTime.
-     * @throws JSONException
-     * @throws ParseException
-     */
-    public static void assertProcessInstances(InstanceDependencyResult instancesResult, OozieClient oozieClient,
-                                        String bundleID, String time)
-        throws OozieClientException, ParseException, JSONException {
-        List<String> inputPath = new ArrayList<>();
-        List<String> outputPath = new ArrayList<>();
-        SchedulableEntityInstance[] instances = instancesResult.getDependencies();
-        LOGGER.info("instances: " + Arrays.toString(instances));
-        Assert.assertNotNull(instances, "instances should be not null");
-        for (SchedulableEntityInstance instance : instances) {
-            Assert.assertNotNull(instance.getCluster());
-            Assert.assertNotNull(instance.getEntityName());
-            Assert.assertNotNull(instance.getEntityType());
-            Assert.assertNotNull(instance.getInstanceTime());
-            Assert.assertNotNull(instance.getTags());
-            if (instance.getTags().equals("Input")) {
-                inputPath.add(new DateTime(instance.getInstanceTime(), DateTimeZone.UTC).toString());
-            }
-            if (instance.getTags().equals("Output")) {
-                outputPath.add(new DateTime(instance.getInstanceTime(), DateTimeZone.UTC).toString());
-            }
-        }
-
-        List<String> inputActual = getMinuteDatesToPath(inputPath.get(inputPath.indexOf(
-            Collections.min(inputPath))), inputPath.get(inputPath.indexOf(Collections.max(inputPath))), 5);
-        List<String> outputActual = getMinuteDatesToPath(outputPath.get(outputPath.indexOf(Collections.min(
-            outputPath))), outputPath.get(outputPath.indexOf(Collections.max(outputPath))), 5);
-
-        Configuration conf = OozieUtil.getProcessConf(oozieClient, bundleID, time);
-        Assert.assertNotNull(conf, "Configuration should not be null");
-        List<String> inputExp = Arrays.asList(conf.get("inputData").split(","));
-        List<String> outputExp = Arrays.asList(conf.get("outputData").split(","));
-
-        Assert.assertTrue(matchList(inputExp, inputActual), " Inputs dont match");
-        Assert.assertTrue(matchList(outputExp, outputActual), " Outputs dont match");
-
-    }
-
-    /**
-     * Returns list of path based on given start and end time.
-     *
-     * @param startOozieDate  start date
-     * @param endOozieDate    end date
-     * @param minuteSkip      difference  between paths
-     * @throws ParseException
-     */
-    public static List<String> getMinuteDatesToPath(String startOozieDate, String endOozieDate,
-                                                    int minuteSkip) throws ParseException {
-        String myFormat = "yyyy'-'MM'-'dd'T'HH':'mm'Z'";
-        String userFormat = "yyyy'-'MM'-'dd'T'HH':'mm':'ss'.'SSS'Z'";
-        return TimeUtil.getMinuteDatesOnEitherSide(TimeUtil.parseDate(startOozieDate, myFormat, userFormat),
-                TimeUtil.parseDate(endOozieDate, myFormat, userFormat), minuteSkip);
-    }
-
-    /**
-     * Parses date from one format to another.
-     *
-     * @param oozieDate  input date
-     * @throws ParseException
-     */
-    public static String getParsedDates(String oozieDate) throws ParseException {
-        String myFormat = "yyyy'-'MM'-'dd'T'HH':'mm'Z'";
-        String userFormat = "yyyy'-'MM'-'dd'T'HH':'mm':'ss'.'SSS'Z'";
-        return TimeUtil.parseDate(oozieDate, myFormat, userFormat);
-    }
-
-    /**
-     * Asserts Whether two list are equal or not.
-     *
-     * @param firstList  list<String> to be comapred
-     * @param secondList  list<String> to be compared
-     */
-    public static boolean matchList(List<String> firstList, List<String> secondList) {
-        Collections.sort(firstList);
-        Collections.sort(secondList);
-        if (firstList.size() != secondList.size()) {
-            return false;
-        }
-        for (int index = 0; index < firstList.size(); index++) {
-            if (!firstList.get(index).contains(secondList.get(index))) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Asserts instanceDependencyResult of specific job for a given feed.
-     *
-     * @param instancesResult  InstanceDependencyResult
-     * @param processName  process name for given bundle
-     * @param tag     Input/Output
-     * @param expectedInstances  instance for given instanceTime.
-     * @throws ParseException
-     */
-    public static void assertFeedInstances(InstanceDependencyResult instancesResult, String processName, String tag,
-                                            List<String> expectedInstances) throws ParseException {
-        List<String> actualInstances = new ArrayList<>();
-        SchedulableEntityInstance[] instances = instancesResult.getDependencies();
-        LOGGER.info("instances: " + Arrays.toString(instances));
-        Assert.assertNotNull(instances, "instances should be not null");
-        for (SchedulableEntityInstance instance : instances) {
-            Assert.assertNotNull(instance.getCluster());
-            Assert.assertNotNull(instance.getEntityName());
-            Assert.assertNotNull(instance.getEntityType());
-            Assert.assertNotNull(instance.getInstanceTime());
-            Assert.assertNotNull(instance.getTags());
-            Assert.assertTrue(instance.getEntityType().toString().equals("PROCESS"), "Type should be PROCESS");
-            Assert.assertTrue(instance.getEntityName().equals(processName), "Expected name is : " + processName);
-            Assert.assertTrue(instance.getTags().equals(tag));
-            actualInstances.add(getParsedDates(new DateTime(instance.getInstanceTime(), DateTimeZone.UTC).toString()));
-        }
-
-        Set<String> expectedInstancesSet = new HashSet<>(expectedInstances);
-        Set<String> actualInstancesSet = new HashSet<>(actualInstances);
-        Assert.assertEquals(expectedInstancesSet, actualInstancesSet, "Instances don't match");
-    }
-}
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/KerberosHelper.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/KerberosHelper.java
deleted file mode 100644
index 9d028fa..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/KerberosHelper.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.io.IOException;
-
-/**
- * Util methods for Kerberos.
- */
-public final class KerberosHelper {
-    private KerberosHelper() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static UserGroupInformation getUGI(String user) throws IOException {
-        // if unsecure cluster create a remote user object
-        if (!MerlinConstants.IS_SECURE) {
-            return UserGroupInformation.createRemoteUser(user);
-        }
-        // if secure create a ugi object from keytab
-        return UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrincipal(user),
-            getKeyTab(user));
-    }
-
-    private static String getKeyTab(String user) {
-        return MerlinConstants.getKeytabForUser(user);
-    }
-
-    private static String getPrincipal(String user) {
-        return MerlinConstants.USER_REALM.isEmpty() ? user : user + '@' + MerlinConstants
-                .USER_REALM;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/LogUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/LogUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/LogUtil.java
deleted file mode 100644
index e587704..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/LogUtil.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.BundleJob;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.apache.oozie.client.WorkflowAction;
-import org.apache.oozie.client.WorkflowJob;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Collection;
-import java.util.List;
-
-/**
- * Util class for managing logs.
- */
-public final class LogUtil {
-    private static final Logger LOGGER = Logger.getLogger(LogUtil.class);
-    private static final String NL = System.getProperty("line.separator");
-    private static final String HR = StringUtils.repeat("-", 80);
-    private static final String HR_2 = StringUtils.repeat("-", 120);
-    private static final String HR_3 = StringUtils.repeat("-", 160);
-
-    private LogUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    private enum OozieDump {
-        BundleDump {
-            @Override
-            void writeLogs(OozieClient oozieClient, String location, Collection<File> filter) {
-                final List<BundleJob> bundleJobsInfo;
-                try {
-                    bundleJobsInfo = oozieClient.getBundleJobsInfo("", 0, 1000000);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch list of bundles. Exception: " + e);
-                    return;
-                }
-                for (BundleJob oneJobInfo : bundleJobsInfo) {
-                    final String bundleJobId = oneJobInfo.getId();
-                    if (!skipInfo()) {
-                        writeOneJobInfo(oozieClient, bundleJobId, location, filter);
-                    }
-                    if (!skipLog()) {
-                        writeOneJobLog(oozieClient, bundleJobId, location, filter);
-                    }
-                }
-            }
-
-            /**
-             * Pull and dump info of one job.
-             * @param oozieClient oozie client that will be used for pulling log
-             * @param bundleJobId job id of the bundle job
-             * @param location local location where logs will be dumped
-             * @param filter list of files that have already been dumped
-             */
-            private void writeOneJobInfo(OozieClient oozieClient, String bundleJobId,
-                                         String location, Collection<File> filter) {
-                final String fileName = OSUtil.concat(location, bundleJobId + "-info.log");
-                final File file = new File(fileName);
-                if (filter != null && filter.contains(file)) {
-                    return;
-                }
-                final BundleJob info;
-                try {
-                    info = oozieClient.getBundleJobInfo(bundleJobId);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch bundle info for " + bundleJobId + ". "
-                        + "Exception: " + e);
-                    return;
-                }
-                StringBuilder sb = new StringBuilder();
-                sb.append("Bundle ID : ").append(info.getId()).append(NL);
-                sb.append(HR).append(NL);
-                sb.append("Bundle Name : ").append(info.getAppName()).append(NL);
-                sb.append("App Path : ").append(info.getAppPath()).append(NL);
-                sb.append("Status : ").append(info.getStatus()).append(NL);
-                sb.append("User : ").append(info.getUser()).append(NL);
-                sb.append("Created : ").append(info.getCreatedTime()).append(NL);
-                sb.append("Started : ").append(info.getStartTime()).append(NL);
-                sb.append("EndTime : ").append(info.getEndTime()).append(NL);
-                sb.append("Kickoff time : ").append(info.getKickoffTime()).append(NL);
-                sb.append(HR_2).append(NL);
-                final String format = "%-40s %-10s %-5s %-10s %-30s %-20s";
-                sb.append(String.format(format,
-                    "Job ID", "Status", "Freq", "Unit", "Started", "Next Materialized")).append(NL);
-                sb.append(HR_2).append(NL);
-                for (CoordinatorJob cj : info.getCoordinators()) {
-                    sb.append(String.format(format,
-                        cj.getId(), cj.getStatus(),  cj.getFrequency(), cj.getTimeUnit(), cj.getStartTime(),
-                        cj.getNextMaterializedTime())).append(NL);
-                }
-                sb.append(HR_2).append(NL);
-                try {
-                    FileUtils.writeStringToFile(file, sb.toString());
-                } catch (IOException e) {
-                    LOGGER.error("Couldn't write bundle info for " + bundleJobId + ". "
-                        + "Exception: " + e);
-                }
-            }
-        },
-
-        CoordDump {
-            @Override
-            void writeLogs(OozieClient oozieClient, String location, Collection<File> filter) {
-                final List<CoordinatorJob> coordJobsInfo;
-                try {
-                    coordJobsInfo = oozieClient.getCoordJobsInfo("", 0, 1000000);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch list of bundles. Exception: " + e);
-                    return;
-                }
-                for (CoordinatorJob oneJobInfo : coordJobsInfo) {
-                    final String coordJobId = oneJobInfo.getId();
-                    if (!skipInfo()) {
-                        writeOneJobInfo(oozieClient, coordJobId, location, filter);
-                    }
-                    if (!skipLog()) {
-                        writeOneJobLog(oozieClient, coordJobId, location, filter);
-                    }
-                }
-            }
-
-            /**
-             * Pull and dump info of one job.
-             * @param oozieClient oozie client that will be used for pulling log
-             * @param coordJobId job id of the coordinator job
-             * @param location local location where logs will be dumped
-             * @param filter list of files that have already been dumped
-             */
-            private void writeOneJobInfo(OozieClient oozieClient, String coordJobId,
-                                         String location, Collection<File> filter) {
-                final String fileName = OSUtil.concat(location, coordJobId + "-info.log");
-                final File file = new File(fileName);
-                if (filter != null && filter.contains(file)) {
-                    return;
-                }
-                final CoordinatorJob info;
-                try {
-                    info = oozieClient.getCoordJobInfo(coordJobId);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch bundle info for " + coordJobId + ". "
-                        + "Exception: " + e);
-                    return;
-                }
-                StringBuilder sb = new StringBuilder();
-                sb.append("Coordinator Job ID : ").append(info.getId()).append(NL);
-                sb.append(HR).append(NL);
-                sb.append("Job Name : ").append(info.getAppName()).append(NL);
-                sb.append("App Path : ").append(info.getAppPath()).append(NL);
-                sb.append("Status : ").append(info.getStatus()).append(NL);
-                sb.append("User : ").append(info.getUser()).append(NL);
-                sb.append("Started : ").append(info.getStartTime()).append(NL);
-                sb.append("EndTime : ").append(info.getEndTime()).append(NL);
-                sb.append(HR_3).append(NL);
-                final String format = "%-40s %-10s %-40s %-10s %-30s %-30s";
-                sb.append(String.format(format,
-                    "Job ID", "Status", "Ext ID", "Err Code", "Created",
-                    "Nominal Time")).append(NL);
-                sb.append(HR_3).append(NL);
-                for (CoordinatorAction cj : info.getActions()) {
-                    sb.append(String.format(format,
-                        cj.getId(), cj.getStatus(),  cj.getExternalId(), cj.getErrorCode(),
-                        cj.getCreatedTime(), cj.getNominalTime())).append(NL);
-                }
-                sb.append(HR_3).append(NL);
-                try {
-                    FileUtils.writeStringToFile(file, sb.toString());
-                } catch (IOException e) {
-                    LOGGER.error("Couldn't write coord job info for " + coordJobId + ". "
-                        + "Exception: " + e);
-                }
-            }
-        },
-
-        WfDump {
-            @Override
-            void writeLogs(OozieClient oozieClient, String location, Collection<File> filter) {
-                final List<WorkflowJob> wfJobsInfo;
-                try {
-                    wfJobsInfo = oozieClient.getJobsInfo("", 0, 1000000);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch list of bundles. Exception: " + e);
-                    return;
-                }
-                for (WorkflowJob oneJobInfo : wfJobsInfo) {
-                    final String wfJobId = oneJobInfo.getId();
-                    if (!skipInfo()) {
-                        writeOneJobInfo(oozieClient, wfJobId, location, filter);
-                    }
-                    if (!skipLog()) {
-                        writeOneJobLog(oozieClient, wfJobId, location, filter);
-                    }
-                }
-            }
-
-            /**
-             * Pull and dump info of one job.
-             * @param oozieClient oozie client that will be used for pulling log
-             * @param wfJobId job id of the workflow job
-             * @param location local location where logs will be dumped
-             * @param filter list of files that have already been dumped
-             */
-            private void writeOneJobInfo(OozieClient oozieClient, String wfJobId,
-                                         String location, Collection<File> filter) {
-                final String fileName = OSUtil.concat(location, wfJobId + "-info.log");
-                final File file = new File(fileName);
-                if (filter != null && filter.contains(file)) {
-                    return;
-                }
-                final WorkflowJob info;
-                try {
-                    info = oozieClient.getJobInfo(wfJobId);
-                } catch (OozieClientException e) {
-                    LOGGER.error("Couldn't fetch bundle info for " + wfJobId + ". Exception: " + e);
-                    return;
-                }
-                StringBuilder sb = new StringBuilder();
-                sb.append("Workflow Job ID : ").append(info.getId()).append(NL);
-                sb.append(HR).append(NL);
-                sb.append("Wf Name : ").append(info.getAppName()).append(NL);
-                sb.append("App Path : ").append(info.getAppPath()).append(NL);
-                sb.append("Status : ").append(info.getStatus()).append(NL);
-                sb.append("Run : ").append(info.getRun()).append(NL);
-                sb.append("User : ").append(info.getUser()).append(NL);
-                sb.append("Group : ").append(info.getAcl()).append(NL);
-                sb.append("Created : ").append(info.getCreatedTime()).append(NL);
-                sb.append("Started : ").append(info.getStartTime()).append(NL);
-                sb.append("Last Modified : ").append(info.getLastModifiedTime()).append(NL);
-                sb.append("EndTime : ").append(info.getEndTime()).append(NL);
-                sb.append("External ID : ").append(info.getExternalId()).append(NL);
-                sb.append(NL).append("Actions").append(NL);
-                sb.append(HR_3).append(NL);
-                final String format = "%-80s %-10s %-40s %-15s %-10s";
-                sb.append(String.format(format,
-                    "Job ID", "Status", "Ext ID", "Ext Status", "Err Code")).append(NL);
-                sb.append(HR_3).append(NL);
-                for (WorkflowAction cj : info.getActions()) {
-                    sb.append(String.format(format,
-                        cj.getId(), cj.getStatus(),  cj.getExternalId(), cj.getExternalStatus(),
-                        cj.getErrorCode())).append(NL);
-                }
-                sb.append(HR_3).append(NL);
-                try {
-                    FileUtils.writeStringToFile(file, sb.toString());
-                } catch (IOException e) {
-                    LOGGER.error("Couldn't write wf job info for " + wfJobId + ". Exception: " + e);
-                }
-            }
-        };
-
-        private static boolean skipInfo() {
-            return Config.getBoolean("log.capture.oozie.skip_info", false);
-        }
-
-        private static boolean skipLog() {
-            return Config.getBoolean("log.capture.oozie.skip_log", false);
-        }
-
-        /**
-         * Pull and dump info and log of all jobs of a type.
-         * @param oozieClient oozie client that will be used for pulling log
-         * @param location local location where logs will be dumped
-         * @param filter list of files that have already been dumped
-         */
-        abstract void writeLogs(OozieClient oozieClient, String location, Collection<File> filter);
-
-        /**
-         * Pull and dump log of one job.
-         * @param oozieClient oozie client that will be used for pulling log
-         * @param jobId job id of the job
-         * @param location local location where logs will be dumped
-         * @param filter list of files that have already been dumped
-         */
-        private static void writeOneJobLog(OozieClient oozieClient, String jobId,
-            String location, Collection<File> filter) {
-            final String fileName = OSUtil.concat(location, jobId + ".log");
-            assert fileName != null;
-            final File file = new File(fileName);
-            if (filter != null && filter.contains(file)) {
-                return;
-            }
-            try {
-                oozieClient.getJobLog(jobId, "", "", new PrintStream(file));
-            } catch (OozieClientException e) {
-                LOGGER.error("Couldn't fetch log for " + jobId + ". Exception: " + e);
-            } catch (FileNotFoundException e) {
-                LOGGER.error("Couldn't write log for " + jobId + ". Exception: " + e);
-            }
-        }
-    }
-
-    /**
-     * Pulls and dumps oozie logs at a configured location.
-     * @param coloHelper coloHelper of the cluster from which oozie logs are going to be pulled
-     * @param logLocation local location at which logs are going to be dumped
-     */
-    public static void writeOozieLogs(ColoHelper coloHelper, String logLocation) {
-        final OozieClient oozieClient = coloHelper.getFeedHelper().getOozieClient();
-        final String hostname = coloHelper.getClusterHelper().getQaHost();
-        final String oozieLogLocation = OSUtil.concat(logLocation, "oozie_logs", hostname);
-        assert oozieLogLocation != null;
-        final File directory = new File(oozieLogLocation);
-        if (!directory.exists()) {
-            try {
-                FileUtils.forceMkdir(directory);
-            } catch (IOException e) {
-                LOGGER.error("Directory creation failed for: " + directory + ". Exception: " + e);
-                return;
-            }
-        }
-        final Collection<File> filter = FileUtils.listFiles(directory, null, true);
-        for (OozieDump oozieDump : OozieDump.values()) {
-            oozieDump.writeLogs(oozieClient, oozieLogLocation, filter);
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/MatrixUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/MatrixUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/MatrixUtil.java
deleted file mode 100644
index 14315b3..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/MatrixUtil.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.testng.Assert;
-
-import java.util.Arrays;
-
-/**
- * Util class for matrix operations.
- */
-public final class MatrixUtil {
-    private MatrixUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    /**
-     * Cross product many arrays.
-     * @param firstArray first array that you want to cross product
-     * @param otherArrays other arrays that you want to cross product
-     * @return cross product
-     */
-    public static Object[][] crossProduct(Object[] firstArray, Object[]... otherArrays) {
-        if (otherArrays == null || otherArrays.length == 0) {
-            Object[][] result = new Object[firstArray.length][1];
-            for (int i = 0; i < firstArray.length; ++i) {
-                result[i][0] = firstArray[i];
-            }
-            return result;
-        }
-        // computing cross product for the rest of the arrays
-        Object[][] restArray = new Object[otherArrays.length-1][];
-        System.arraycopy(otherArrays, 1, restArray, 0, otherArrays.length - 1);
-        Object[][] restCrossProduct = crossProduct(otherArrays[0], restArray);
-        //creating and initializing result array
-        Object[][] result = new Object[firstArray.length * restCrossProduct.length][];
-        for(int i = 0; i < result.length; ++i) {
-            result[i] = new Object[otherArrays.length + 1];
-        }
-        //doing the final cross product
-        for (int i = 0; i < firstArray.length; ++i) {
-            for (int j = 0; j < restCrossProduct.length; ++j) {
-                //computing one row of result
-                final int rowIdx = i * restCrossProduct.length + j;
-                result[rowIdx][0] = firstArray[i];
-                System.arraycopy(restCrossProduct[j], 0, result[rowIdx], 1, otherArrays.length);
-            }
-        }
-        return result;
-    }
-
-    public static Object[][] append(Object[][] arr1, Object[][] arr2) {
-        Assert.assertFalse(ArrayUtils.isEmpty(arr1), "arr1 can't be empty:"
-            + Arrays.deepToString(arr1));
-        Assert.assertFalse(ArrayUtils.isEmpty(arr2), "arr2 can't be empty:"
-            + Arrays.deepToString(arr2));
-        Assert.assertEquals(arr1[0].length, arr2[0].length, "Array rows are not compatible. "
-            + "row of first array: " + Arrays.deepToString(arr1[0])
-            + "row of second array: " + Arrays.deepToString(arr2[0]));
-        return (Object[][]) ArrayUtils.addAll(arr1, arr2);
-    }
-
-    /**
-     * Cross product many arrays.
-     * @param firstArray first array that you want to cross product
-     * @param otherArrays other arrays that you want to cross product
-     * @return cross product
-     */
-    public static Object[][] crossProductNew(Object[] firstArray, Object[][]... otherArrays) {
-        return crossProduct(firstArray, otherArrays);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OSUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OSUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OSUtil.java
deleted file mode 100644
index 19f5f57..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OSUtil.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.io.FilenameUtils;
-
-/**
- * Util methods related to OS.
- */
-public final class OSUtil {
-    private OSUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    public static final boolean IS_WINDOWS = System.getProperty("os.name").toLowerCase().startsWith("windows");
-    public static final String WIN_SU_BINARY =
-            Config.getProperty("windows.su.binary", "ExecuteAs.exe");
-
-    private static final String SEPARATOR = System.getProperty("file.separator", "/");
-
-    public static final String RESOURCES = concat("src", "test", "resources");
-    public static final String RESOURCES_OOZIE = concat(RESOURCES, "oozie");
-    public static final String OOZIE_EXAMPLE_INPUT_DATA = concat(RESOURCES, "OozieExampleInputData");
-    public static final String NORMAL_INPUT = concat(OOZIE_EXAMPLE_INPUT_DATA, "normalInput");
-    public static final String SINGLE_FILE = concat(OOZIE_EXAMPLE_INPUT_DATA, "SingleFile");
-    public static final String OOZIE_COMBINED_ACTIONS = concat(RESOURCES, "combinedWorkflow");
-
-    public static final String OOZIE_LIB_FOLDER = concat(RESOURCES, "oozieLib");
-    public static final String MULTIPLE_ACTION_WORKFLOW = concat(RESOURCES, "MultipleActionWorkflow");
-    public static final String PIG_DIR = concat(RESOURCES, "pig");
-
-
-    public static String concat(String path1, String path2, String... pathParts) {
-        String path = FilenameUtils.concat(path1, path2);
-        for (String pathPart : pathParts) {
-            path = FilenameUtils.concat(path, pathPart);
-        }
-        return path;
-    }
-}


[12/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OozieUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OozieUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OozieUtil.java
deleted file mode 100644
index 4c609b3..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/OozieUtil.java
+++ /dev/null
@@ -1,855 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.oozie.client.AuthOozieClient;
-import org.apache.oozie.client.BundleJob;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.WorkflowAction;
-import org.apache.oozie.client.WorkflowJob;
-import org.joda.time.DateTime;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.json.JSONException;
-import org.testng.Assert;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * helper methods for oozie .
- */
-public final class OozieUtil {
-
-    public static final String FAIL_MSG = "NO_such_workflow_exists";
-    private OozieUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-
-    private static final Logger LOGGER = Logger.getLogger(OozieUtil.class);
-
-    public static AuthOozieClient getClient(String url) {
-        return new AuthOozieClient(url);
-    }
-
-    public static List<BundleJob> getBundles(OozieClient client, String filter, int start, int len)
-        throws OozieClientException {
-        LOGGER.info("Connecting to oozie: " + client.getOozieUrl());
-        return client.getBundleJobsInfo(filter, start, len);
-    }
-
-    public static List<String> getBundleIds(List<BundleJob> bundles) {
-        List<String> ids = new ArrayList<>();
-        for (BundleJob bundle : bundles) {
-            LOGGER.info("Bundle Id: " + bundle.getId());
-            ids.add(bundle.getId());
-        }
-        return ids;
-    }
-
-    public static List<Job.Status> getBundleStatuses(List<BundleJob> bundles) {
-        List<Job.Status> statuses = new ArrayList<>();
-        for (BundleJob bundle : bundles) {
-            LOGGER.info("bundle: " + bundle);
-            statuses.add(bundle.getStatus());
-        }
-        return statuses;
-    }
-
-    public static String getMaxId(List<String> ids) {
-        String oozieId = ids.get(0);
-        int maxInt = Integer.valueOf(oozieId.split("-")[0]);
-        for (int i = 1; i < ids.size(); i++) {
-            String currentId = ids.get(i);
-            int currInt = Integer.valueOf(currentId.split("-")[0]);
-            if (currInt > maxInt) {
-                oozieId = currentId;
-            }
-        }
-        return oozieId;
-    }
-
-    public static String getMinId(List<String> ids) {
-        String oozieId = ids.get(0);
-        int minInt = Integer.valueOf(oozieId.split("-")[0]);
-        for (int i = 1; i < ids.size(); i++) {
-            String currentId = ids.get(i);
-            int currInt = Integer.valueOf(currentId.split("-")[0]);
-            if (currInt < minInt) {
-                oozieId = currentId;
-            }
-        }
-        return oozieId;
-    }
-
-    /**
-     * @param bundleID bundle number
-     * @param oozieClient oozie client
-     * @return list of action ids of the succeeded retention workflow
-     * @throws OozieClientException
-     */
-    public static List<String> waitForRetentionWorkflowToSucceed(String bundleID,
-                                                                 OozieClient oozieClient)
-        throws OozieClientException {
-        LOGGER.info("Connecting to oozie: " + oozieClient.getOozieUrl());
-        List<String> jobIds = new ArrayList<>();
-        LOGGER.info("using bundleId:" + bundleID);
-        waitForCoordinatorJobCreation(oozieClient, bundleID);
-        final String coordinatorId =
-            oozieClient.getBundleJobInfo(bundleID).getCoordinators().get(0).getId();
-        LOGGER.info("using coordinatorId: " + coordinatorId);
-
-        for (int i = 0;
-             i < 120 && oozieClient.getCoordJobInfo(coordinatorId).getActions().isEmpty(); ++i) {
-            TimeUtil.sleepSeconds(4);
-        }
-        Assert.assertFalse(oozieClient.getCoordJobInfo(coordinatorId).getActions().isEmpty(),
-            "Coordinator actions should have got created by now.");
-
-        CoordinatorAction action = oozieClient.getCoordJobInfo(coordinatorId).getActions().get(0);
-        for (int i = 0; i < 180; ++i) {
-            CoordinatorAction actionInfo = oozieClient.getCoordActionInfo(action.getId());
-            LOGGER.info("actionInfo: " + actionInfo);
-            if (EnumSet.of(CoordinatorAction.Status.SUCCEEDED, CoordinatorAction.Status.KILLED,
-                CoordinatorAction.Status.FAILED).contains(actionInfo.getStatus())) {
-                break;
-            }
-            TimeUtil.sleepSeconds(10);
-        }
-        Assert.assertEquals(
-            oozieClient.getCoordActionInfo(action.getId()).getStatus(),
-            CoordinatorAction.Status.SUCCEEDED,
-            "Action did not succeed.");
-        jobIds.add(action.getId());
-
-        return jobIds;
-    }
-
-    public static void waitForCoordinatorJobCreation(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        LOGGER.info("Connecting to oozie: " + oozieClient.getOozieUrl());
-        for (int i = 0;
-             i < 60 && oozieClient.getBundleJobInfo(bundleID).getCoordinators().isEmpty(); ++i) {
-            TimeUtil.sleepSeconds(2);
-        }
-        Assert.assertFalse(oozieClient.getBundleJobInfo(bundleID).getCoordinators().isEmpty(),
-            "Coordinator job should have got created by now.");
-    }
-
-    public static Job.Status getOozieJobStatus(OozieClient client, String processName,
-                                               EntityType entityType)
-        throws OozieClientException {
-        String filter = String.format("name=FALCON_%s_%s", entityType, processName);
-        List<Job.Status> statuses = getBundleStatuses(getBundles(client, filter, 0, 10));
-        if (statuses.isEmpty()) {
-            return null;
-        } else {
-            return statuses.get(0);
-        }
-    }
-
-    public static List<String> getBundles(OozieClient client, String entityName,
-                                          EntityType entityType)
-        throws OozieClientException {
-        String filter = "name=FALCON_" + entityType + "_" + entityName;
-        return getBundleIds(getBundles(client, filter, 0, 10));
-    }
-
-    public static List<DateTime> getStartTimeForRunningCoordinators(ColoHelper prismHelper,
-                                                                    String bundleID)
-        throws OozieClientException {
-        List<DateTime> startTimes = new ArrayList<>();
-
-        OozieClient oozieClient = prismHelper.getClusterHelper().getOozieClient();
-        BundleJob bundleJob = oozieClient.getBundleJobInfo(bundleID);
-        CoordinatorJob jobInfo;
-
-
-        for (CoordinatorJob job : bundleJob.getCoordinators()) {
-
-            if (job.getAppName().contains("DEFAULT")) {
-                jobInfo = oozieClient.getCoordJobInfo(job.getId());
-                for (CoordinatorAction action : jobInfo.getActions()) {
-                    DateTime temp = new DateTime(action.getCreatedTime(), DateTimeZone.UTC);
-                    LOGGER.info(temp);
-                    startTimes.add(temp);
-                }
-            }
-
-            Collections.sort(startTimes);
-
-            if (!(startTimes.isEmpty())) {
-                return startTimes;
-            }
-        }
-
-        return null;
-    }
-
-    public static boolean verifyOozieJobStatus(OozieClient client, String processName,
-                                               EntityType entityType, Job.Status expectedStatus)
-        throws OozieClientException {
-        for (int seconds = 0; seconds < 100; seconds+=5) {
-            Job.Status status = getOozieJobStatus(client, processName, entityType);
-            LOGGER.info("Current status: " + status);
-            if (status == expectedStatus) {
-                return true;
-            }
-            TimeUtil.sleepSeconds(5);
-        }
-        return false;
-    }
-
-
-    public static List<String> getMissingDependencies(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        CoordinatorJob jobInfo;
-        jobInfo = null;
-        BundleJob bundleJob = oozieClient.getBundleJobInfo(bundleID);
-        List<CoordinatorJob> coordinatorJobList = bundleJob.getCoordinators();
-        if (coordinatorJobList.size() > 1) {
-
-            for (CoordinatorJob coord : bundleJob.getCoordinators()) {
-                LOGGER.info("Appname is : " + coord.getAppName());
-                if ((coord.getAppName().contains("DEFAULT")
-                        && coord.getAppName().contains("PROCESS"))
-                    ||
-                    (coord.getAppName().contains("REPLICATION")
-                        && coord.getAppName().contains("FEED"))) {
-                    jobInfo = oozieClient.getCoordJobInfo(coord.getId());
-                } else {
-                    LOGGER.info("Desired coord does not exists on " + oozieClient.getOozieUrl());
-                }
-            }
-
-        } else {
-            jobInfo = oozieClient.getCoordJobInfo(bundleJob.getCoordinators().get(0).getId());
-        }
-
-        LOGGER.info("Coordinator id : " + jobInfo);
-        List<CoordinatorAction> actions = null;
-        if (jobInfo != null) {
-            actions = jobInfo.getActions();
-        }
-
-        if (actions != null) {
-            if (actions.size() < 1) {
-                return null;
-            }
-        }
-        if (actions != null) {
-            LOGGER.info("conf from event: " + actions.get(0).getMissingDependencies());
-        }
-
-        String[] missingDependencies = new String[0];
-        if (actions != null) {
-            missingDependencies = actions.get(0).getMissingDependencies().split("#");
-        }
-        return new ArrayList<>(Arrays.asList(missingDependencies));
-    }
-
-    public static List<String> getWorkflowJobs(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        waitForCoordinatorJobCreation(oozieClient, bundleID);
-        List<String> workflowIds = new ArrayList<>();
-        List<CoordinatorJob> coordJobs = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
-        CoordinatorJob coordJobInfo = oozieClient.getCoordJobInfo(coordJobs.get(0).getId());
-
-        for (CoordinatorAction action : coordJobInfo.getActions()) {
-            workflowIds.add(action.getExternalId());
-        }
-        return workflowIds;
-    }
-
-    public static List<String> getWorkflow(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        waitForCoordinatorJobCreation(oozieClient, bundleID);
-        List<String> workflowIds = new ArrayList<>();
-        String coordId = getDefaultCoordIDFromBundle(oozieClient, bundleID);
-        CoordinatorJob coordJobInfo = oozieClient.getCoordJobInfo(coordId);
-        for (CoordinatorAction action : coordJobInfo.getActions()) {
-            if (action.getStatus().name().equals("RUNNING") || action.getStatus().name().equals("SUCCEEDED")) {
-                workflowIds.add(action.getExternalId());
-            }
-            if (action.getStatus().name().equals("KILLED") || action.getStatus().name().equals("WAITING")) {
-                Assert.assertNull(action.getExternalId());
-            }
-        }
-        return workflowIds;
-    }
-
-    public static Date getNominalTime(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        BundleJob bundleJob = oozieClient.getBundleJobInfo(bundleID);
-        CoordinatorJob jobInfo =
-            oozieClient.getCoordJobInfo(bundleJob.getCoordinators().get(0).getId());
-        List<CoordinatorAction> actions = jobInfo.getActions();
-        return actions.get(0).getNominalTime();
-    }
-
-    public static CoordinatorJob getDefaultOozieCoord(OozieClient oozieClient, String bundleId,
-                                                      EntityType type)
-        throws OozieClientException {
-        BundleJob bundlejob = oozieClient.getBundleJobInfo(bundleId);
-        for (CoordinatorJob coord : bundlejob.getCoordinators()) {
-            if ((coord.getAppName().contains("DEFAULT") && EntityType.PROCESS == type)
-                    ||
-                (coord.getAppName().contains("REPLICATION") && EntityType.FEED == type)) {
-                return oozieClient.getCoordJobInfo(coord.getId());
-            } else {
-                LOGGER.info("Desired coord does not exists on " + oozieClient.getOozieUrl());
-            }
-        }
-        return null;
-    }
-
-    public static int getNumberOfWorkflowInstances(OozieClient oozieClient, String bundleId)
-        throws OozieClientException {
-        return getDefaultOozieCoord(oozieClient, bundleId, EntityType.PROCESS).getActions().size();
-    }
-
-    public static List<String> getActionsNominalTime(OozieClient oozieClient,
-                                                     String bundleId, EntityType type)
-        throws OozieClientException {
-        Map<Date, CoordinatorAction.Status> actions = getActionsNominalTimeAndStatus(oozieClient, bundleId, type);
-        List<String> nominalTime = new ArrayList<>();
-        for (Date date : actions.keySet()) {
-            nominalTime.add(date.toString());
-        }
-        return nominalTime;
-    }
-
-    public static Map<Date, CoordinatorAction.Status> getActionsNominalTimeAndStatus(OozieClient oozieClient,
-            String bundleId, EntityType type) throws OozieClientException {
-        Map<Date, CoordinatorAction.Status> result = new TreeMap<>();
-        List<CoordinatorAction> actions = getDefaultOozieCoord(oozieClient, bundleId, type).getActions();
-        for (CoordinatorAction action : actions) {
-            result.put(action.getNominalTime(), action.getStatus());
-        }
-        return result;
-    }
-
-    public static boolean isBundleOver(ColoHelper coloHelper, String bundleId)
-        throws OozieClientException {
-        OozieClient client = coloHelper.getClusterHelper().getOozieClient();
-
-        BundleJob bundleJob = client.getBundleJobInfo(bundleId);
-
-        if (EnumSet.of(BundleJob.Status.DONEWITHERROR, BundleJob.Status.FAILED,
-            BundleJob.Status.SUCCEEDED, BundleJob.Status.KILLED).contains(bundleJob.getStatus())) {
-            return true;
-        }
-        TimeUtil.sleepSeconds(20);
-        return false;
-    }
-
-    public static void verifyNewBundleCreation(OozieClient oozieClient, String originalBundleId,
-                                               List<String> initialNominalTimes, String entity,
-                                               boolean shouldBeCreated, boolean matchInstances)
-        throws OozieClientException {
-        String entityName = Util.readEntityName(entity);
-        EntityType entityType = Util.getEntityType(entity);
-        String newBundleId = getLatestBundleID(oozieClient, entityName, entityType);
-        if (shouldBeCreated) {
-            Assert.assertTrue(!newBundleId.equalsIgnoreCase(originalBundleId),
-                "eeks! new bundle is not getting created!!!!");
-            LOGGER.info("old bundleId=" + originalBundleId + " on oozie: " + oozieClient);
-            LOGGER.info("new bundleId=" + newBundleId + " on oozie: " + oozieClient);
-            if (matchInstances) {
-                validateNumberOfWorkflowInstances(oozieClient,
-                        initialNominalTimes, originalBundleId, newBundleId, entityType);
-            }
-        } else {
-            Assert.assertEquals(newBundleId, originalBundleId, "eeks! new bundle is getting created!!!!");
-        }
-    }
-
-    private static void validateNumberOfWorkflowInstances(OozieClient oozieClient,
-                                                          List<String> initialNominalTimes,
-                                                          String originalBundleId,
-                                                          String newBundleId, EntityType type)
-        throws OozieClientException {
-        List<String> nominalTimesOriginalAndNew = getActionsNominalTime(oozieClient, originalBundleId, type);
-        nominalTimesOriginalAndNew.addAll(getActionsNominalTime(oozieClient, newBundleId, type));
-        initialNominalTimes.removeAll(nominalTimesOriginalAndNew);
-        if (initialNominalTimes.size() != 0) {
-            LOGGER.info("Missing instance are : " + initialNominalTimes);
-            LOGGER.debug("Original Bundle ID   : " + originalBundleId);
-            LOGGER.debug("New Bundle ID        : " + newBundleId);
-            Assert.fail("some instances have gone missing after update");
-        }
-    }
-
-    public static String getCoordStartTime(OozieClient oozieClient, String entity, int bundleNo)
-        throws OozieClientException {
-        String bundleID = getSequenceBundleID(oozieClient,
-            Util.readEntityName(entity), Util.getEntityType(entity), bundleNo);
-        CoordinatorJob coord = getDefaultOozieCoord(oozieClient, bundleID,
-            Util.getEntityType(entity));
-        return TimeUtil.dateToOozieDate(coord.getStartTime());
-    }
-
-    public static DateTimeFormatter getOozieDateTimeFormatter() {
-        return DateTimeFormat.forPattern("yyyy'-'MM'-'dd'T'HH':'mm'Z'");
-    }
-
-    public static int getNumberOfBundle(OozieClient oozieClient, EntityType type, String entityName)
-        throws OozieClientException {
-        return OozieUtil.getBundles(oozieClient, entityName, type).size();
-    }
-
-    public static void createMissingDependencies(ColoHelper helper, EntityType type,
-                                                 String entityName, int bundleNumber,
-                                                 int instanceNumber)
-        throws OozieClientException, IOException {
-        final OozieClient oozieClient = helper.getClusterHelper().getOozieClient();
-        String bundleID = getSequenceBundleID(oozieClient, entityName, type, bundleNumber);
-        List<CoordinatorJob> coords = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
-        final List<String> missingDependencies = getMissingDependenciesForInstance(oozieClient, coords, instanceNumber);
-        HadoopUtil.createFolders(helper.getClusterHelper().getHadoopFS(), helper.getPrefix(), missingDependencies);
-    }
-
-    private static List<String> getMissingDependenciesForInstance(OozieClient oozieClient,
-            List<CoordinatorJob> coords, int instanceNumber)
-        throws OozieClientException {
-        ArrayList<String> missingPaths = new ArrayList<>();
-        for (CoordinatorJob coord : coords) {
-            CoordinatorJob temp = oozieClient.getCoordJobInfo(coord.getId());
-            CoordinatorAction instance = temp.getActions().get(instanceNumber);
-            missingPaths.addAll(Arrays.asList(instance.getMissingDependencies().split("#")));
-        }
-        return missingPaths;
-    }
-
-    public static List<List<String>> createMissingDependencies(ColoHelper helper, EntityType type,
-                                                 String entityName, int bundleNumber)
-        throws OozieClientException, IOException {
-        final OozieClient oozieClient = helper.getClusterHelper().getOozieClient();
-        String bundleID = getSequenceBundleID(oozieClient, entityName, type, bundleNumber);
-        return createMissingDependenciesForBundle(helper, bundleID);
-    }
-
-    public static List<List<String>> createMissingDependenciesForBundle(ColoHelper helper, String bundleId)
-        throws OozieClientException, IOException {
-        OozieClient oozieClient = helper.getClusterHelper().getOozieClient();
-        List<CoordinatorJob> coords = oozieClient.getBundleJobInfo(bundleId).getCoordinators();
-        List<List<String>> missingDependencies = getMissingDependenciesForBundle(oozieClient, coords);
-        for (List<String> missingDependencyPerInstance : missingDependencies) {
-            HadoopUtil.createFolders(helper.getClusterHelper().getHadoopFS(), helper.getPrefix(),
-                missingDependencyPerInstance);
-        }
-        return missingDependencies;
-    }
-
-    private static List<List<String>> getMissingDependenciesForBundle(OozieClient oozieClient,
-                                                                      List<CoordinatorJob> coords)
-        throws OozieClientException, IOException {
-        List<List<String>> missingDependencies = new ArrayList<>();
-        for (CoordinatorJob coord : coords) {
-            CoordinatorJob temp = oozieClient.getCoordJobInfo(coord.getId());
-            for (int instanceNumber = 0; instanceNumber < temp.getActions().size();
-                 instanceNumber++) {
-                CoordinatorAction instance = temp.getActions().get(instanceNumber);
-                missingDependencies.add(Arrays.asList(instance.getMissingDependencies().split("#")));
-            }
-        }
-        return missingDependencies;
-    }
-
-    public static void validateRetryAttempts(OozieClient oozieClient, String bundleId, EntityType type,
-                                             int attempts) throws OozieClientException {
-        CoordinatorJob coord = getDefaultOozieCoord(oozieClient, bundleId, type);
-        int actualRun = oozieClient.getJobInfo(coord.getActions().get(0).getExternalId()).getRun();
-        LOGGER.info("Actual run count: " + actualRun); // wrt 0
-        Assert.assertEquals(actualRun, attempts, "Rerun attempts did not match");
-    }
-
-    /**
-     * Try to find feed coordinators of given type.
-     */
-    public static int checkIfFeedCoordExist(OozieClient oozieClient,
-                                            String feedName, String coordType) throws OozieClientException {
-        return checkIfFeedCoordExist(oozieClient, feedName, coordType, 5);
-    }
-
-    /**
-     * Try to find feed coordinators of given type given number of times.
-     */
-    public static int checkIfFeedCoordExist(OozieClient oozieClient,
-            String feedName, String coordType, int numberOfRetries) throws OozieClientException {
-        LOGGER.info("feedName: " + feedName);
-        for (int retryAttempt = 0; retryAttempt < numberOfRetries; retryAttempt++) {
-            int numberOfCoord = 0;
-            List<String> bundleIds = getBundles(oozieClient, feedName, EntityType.FEED);
-            if (bundleIds.size() == 0) {
-                TimeUtil.sleepSeconds(4);
-                continue;
-            }
-            LOGGER.info("bundleIds: " + bundleIds);
-            for (String aBundleId : bundleIds) {
-                LOGGER.info("aBundleId: " + aBundleId);
-                waitForCoordinatorJobCreation(oozieClient, aBundleId);
-                List<CoordinatorJob> coords = getBundleCoordinators(oozieClient, aBundleId);
-                LOGGER.info("coords: " + coords);
-                for (CoordinatorJob coord : coords) {
-                    if (coord.getAppName().contains(coordType)) {
-                        numberOfCoord++;
-                    }
-                }
-            }
-            if (numberOfCoord > 0) {
-                return numberOfCoord;
-            }
-            TimeUtil.sleepSeconds(4);
-        }
-        return 0;
-    }
-
-    /**
-     * Retrieves replication coordinatorID from bundle of coordinators.
-     */
-    public static List<String> getReplicationCoordID(String bundleId, AbstractEntityHelper helper)
-        throws OozieClientException {
-        final OozieClient oozieClient = helper.getOozieClient();
-        List<CoordinatorJob> coords = getBundleCoordinators(oozieClient, bundleId);
-        List<String> replicationCoordID = new ArrayList<>();
-        for (CoordinatorJob coord : coords) {
-            if (coord.getAppName().contains("FEED_REPLICATION")) {
-                replicationCoordID.add(coord.getId());
-            }
-        }
-        return replicationCoordID;
-    }
-
-    /**
-     * Retrieves ID of bundle related to some process/feed using its ordinal number.
-     *
-     * @param entityName   - name of entity bundle is related to
-     * @param entityType   - feed or process
-     * @param bundleNumber - ordinal number of bundle
-     * @return bundle ID
-     * @throws org.apache.oozie.client.OozieClientException
-     */
-    public static String getSequenceBundleID(OozieClient oozieClient, String entityName,
-            EntityType entityType, int bundleNumber) throws OozieClientException {
-        //sequence start from 0
-        List<String> bundleIds = getBundles(oozieClient, entityName, entityType);
-        Collections.sort(bundleIds);
-        if (bundleNumber < bundleIds.size()) {
-            return bundleIds.get(bundleNumber);
-        }
-        return null;
-    }
-
-    /**
-     * Retrieves the latest bundle ID.
-     *
-     * @param oozieClient where job is running
-     * @param entityName  name of entity job is related to
-     * @param entityType  type of entity - feed or process expected
-     * @return latest bundle ID
-     * @throws org.apache.oozie.client.OozieClientException
-     */
-    public static String getLatestBundleID(OozieClient oozieClient,
-            String entityName, EntityType entityType) throws OozieClientException {
-        List<String> bundleIds = getBundles(oozieClient, entityName, entityType);
-        String max = "0";
-        int maxID = -1;
-        for (String strID : bundleIds) {
-            if (maxID < Integer.parseInt(strID.substring(0, strID.indexOf('-')))) {
-                maxID = Integer.parseInt(strID.substring(0, strID.indexOf('-')));
-                max = strID;
-            }
-        }
-        return max;
-    }
-
-    /**
-     * Retrieves all coordinators of bundle.
-     *
-     * @param oozieClient Oozie client to use for fetching info.
-     * @param bundleID specific bundle ID
-     * @return list of bundle coordinators
-     * @throws org.apache.oozie.client.OozieClientException
-     */
-    public static List<CoordinatorJob> getBundleCoordinators(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        BundleJob bundleInfo = oozieClient.getBundleJobInfo(bundleID);
-        return bundleInfo.getCoordinators();
-    }
-
-    public static Job.Status getDefaultCoordinatorStatus(OozieClient oozieClient, String processName,
-            int bundleNumber) throws OozieClientException {
-        String bundleID = getSequenceBundleID(oozieClient, processName, EntityType.PROCESS, bundleNumber);
-        String coordId = getDefaultCoordIDFromBundle(oozieClient, bundleID);
-        return oozieClient.getCoordJobInfo(coordId).getStatus();
-    }
-
-    public static String getDefaultCoordIDFromBundle(OozieClient oozieClient, String bundleId)
-        throws OozieClientException {
-        waitForCoordinatorJobCreation(oozieClient, bundleId);
-        BundleJob bundleInfo = oozieClient.getBundleJobInfo(bundleId);
-        List<CoordinatorJob> coords = bundleInfo.getCoordinators();
-        int min = 100000;
-        String minString = "";
-        for (CoordinatorJob coord : coords) {
-            String strID = coord.getId();
-            if (min > Integer.parseInt(strID.substring(0, strID.indexOf('-')))) {
-                min = Integer.parseInt(strID.substring(0, strID.indexOf('-')));
-                minString = coord.getId();
-            }
-        }
-        LOGGER.info("function getDefaultCoordIDFromBundle: minString: " + minString);
-        return minString;
-    }
-
-    public static String getLatestCoordinatorID(OozieClient oozieClient, String processName,
-            EntityType entityType) throws OozieClientException {
-        final String latestBundleID = getLatestBundleID(oozieClient, processName, entityType);
-        return getDefaultCoordIDFromBundle(oozieClient, latestBundleID);
-    }
-
-    /**
-     * Waits till bundle job will reach expected status.
-     * Generates time according to expected status.
-     *
-     * @param oozieClient    oozieClient of cluster job is running on
-     * @param processName    name of process which job is being analyzed
-     * @param expectedStatus job status we are waiting for
-     * @throws org.apache.oozie.client.OozieClientException
-     */
-    public static void waitForBundleToReachState(OozieClient oozieClient,
-            String processName, Job.Status expectedStatus) throws OozieClientException {
-        int totalMinutesToWait = getMinutesToWait(expectedStatus);
-        waitForBundleToReachState(oozieClient, processName, expectedStatus, totalMinutesToWait);
-    }
-
-    /**
-     * Waits till bundle job will reach expected status during specific time.
-     * Use it directly in test cases when timeouts are different from trivial, in other cases use
-     * waitForBundleToReachState(OozieClient, String, Status)
-     *
-     * @param oozieClient        oozie client of cluster job is running on
-     * @param processName        name of process which job is being analyzed
-     * @param expectedStatus     job status we are waiting for
-     * @param totalMinutesToWait specific time to wait expected state
-     * @throws org.apache.oozie.client.OozieClientException
-     */
-    public static void waitForBundleToReachState(OozieClient oozieClient, String processName,
-            Job.Status expectedStatus, int totalMinutesToWait) throws OozieClientException {
-        int sleep = totalMinutesToWait * 60 / 20;
-        for (int sleepCount = 0; sleepCount < sleep; sleepCount++) {
-            String bundleID =
-                    getLatestBundleID(oozieClient, processName, EntityType.PROCESS);
-            BundleJob j = oozieClient.getBundleJobInfo(bundleID);
-            LOGGER.info(sleepCount + ". Current status: " + j.getStatus()
-                + "; expected: " + expectedStatus);
-            if (j.getStatus() == expectedStatus) {
-                return;
-            }
-            TimeUtil.sleepSeconds(20);
-        }
-        Assert.fail("State " + expectedStatus + " wasn't reached in " + totalMinutesToWait + " mins");
-    }
-
-    /**
-     * Generates time which is presumably needed for bundle job to reach particular state.
-     *
-     * @param expectedStatus status which we are expect to get from bundle job
-     * @return minutes to wait for expected status
-     */
-    private static int getMinutesToWait(Job.Status expectedStatus) {
-        switch (expectedStatus) {
-        case DONEWITHERROR:
-        case SUCCEEDED:
-            return OSUtil.IS_WINDOWS ? 40 : 20;
-        case KILLED:
-            return OSUtil.IS_WINDOWS ? 30 : 15;
-        default:
-            return OSUtil.IS_WINDOWS ? 60 : 30;
-        }
-    }
-
-    public static String getActionStatus(OozieClient oozieClient, String workflowId, String actionName)
-        throws OozieClientException {
-        List<WorkflowAction> wfAction = oozieClient.getJobInfo(workflowId).getActions();
-        for (WorkflowAction wf : wfAction) {
-            if (wf.getName().contains(actionName)) {
-                return wf.getExternalStatus();
-            }
-        }
-        return "";
-    }
-
-    public static String getWorkflowActionStatus(OozieClient oozieClient, String bundleId, String actionName)
-        throws OozieClientException {
-        List<String> workflowIds = getWorkflowJobs(oozieClient, bundleId);
-        if (workflowIds.get(0).isEmpty()) {
-            return FAIL_MSG;
-        }
-        return getActionStatus(oozieClient, workflowIds.get(0), actionName);
-    }
-
-    public static String getSubWorkflowActionStatus(OozieClient oozieClient, String bundleId,
-                                                    String actionName, String subAction)
-        throws OozieClientException {
-        List<String> workflowIds = getWorkflowJobs(oozieClient, bundleId);
-        if (workflowIds.get(0).isEmpty()) {
-            return FAIL_MSG;
-        }
-
-        String wid="";
-        List<WorkflowAction> wfAction = oozieClient.getJobInfo(workflowIds.get(0)).getActions();
-        for (WorkflowAction wf : wfAction) {
-            if (wf.getName().contains(actionName)) {
-                wid = wf.getExternalId();
-            }
-        }
-
-        if (!wid.isEmpty()) {
-            return getActionStatus(oozieClient, wid, subAction);
-        }
-        return FAIL_MSG;
-    }
-
-    /**
-     * Returns configuration object of a given bundleID for a given instanceTime.
-     *
-     * @param oozieClient  oozie client of cluster job is running on
-     * @param bundleID     bundleID of given cluster
-     * @param time         instanceTime
-     * @throws org.apache.oozie.client.OozieClientException
-     * @throws org.json.JSONException
-     */
-    public static Configuration getProcessConf(OozieClient oozieClient, String bundleID, String time)
-        throws OozieClientException, JSONException {
-        waitForCoordinatorJobCreation(oozieClient, bundleID);
-        List<CoordinatorJob> coordJobs = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
-        CoordinatorJob coordJobInfo = oozieClient.getCoordJobInfo(coordJobs.get(0).getId());
-
-        Configuration conf = new Configuration();
-        for (CoordinatorAction action : coordJobInfo.getActions()) {
-            String dateStr = (new DateTime(action.getNominalTime(), DateTimeZone.UTC)).toString();
-            if (!dateStr.isEmpty() && dateStr.contains(time.replace("Z", ""))) {
-                conf.addResource(new ByteArrayInputStream(oozieClient.getJobInfo(action.getExternalId()).
-                        getConf().getBytes()));
-            }
-        }
-        return conf;
-    }
-
-    /**
-     * Method retrieves and parses replication coordinator action workflow definition and checks whether specific
-     * properties are present in list of workflow args or not.
-     * @param workflowDefinition workflow definition
-     * @param actionName action within workflow, e.g pre-processing, replication etc.
-     * @param propMap specific properties which are expected to be in arg list
-     * @return true if all keys and values are present, false otherwise
-     */
-    public static boolean propsArePresentInWorkflow(String workflowDefinition, String actionName,
-                                              HashMap<String, String> propMap) {
-        //get action definition
-        Document definition = Util.convertStringToDocument(workflowDefinition);
-        Assert.assertNotNull(definition, "Workflow definition shouldn't be null.");
-        NodeList actions = definition.getElementsByTagName("action");
-        Element action = null;
-        for (int i = 0; i < actions.getLength(); i++) {
-            Node node = actions.item(i);
-            if (node.getNodeType() == Node.ELEMENT_NODE) {
-                action = (Element) node;
-                if (action.getAttribute("name").equals(actionName)) {
-                    break;
-                }
-                action = null;
-            }
-        }
-        Assert.assertNotNull(action, actionName + " action not found.");
-
-        //retrieve and checks whether properties are present in workflow args
-        Element javaElement = (Element) action.getElementsByTagName("java").item(0);
-        NodeList args = javaElement.getElementsByTagName("arg");
-        int counter = 0;
-        String key = null;
-        for (int i = 0; i < args.getLength(); i++) {
-            Node node = args.item(i);
-            if (node.getNodeType() == Node.ELEMENT_NODE) {
-                String argKey = node.getTextContent().replace("-", "");
-                if (key != null && propMap.get(key).equals(argKey)) {
-                    counter++;
-                    key = null;
-                } else if (key == null && propMap.containsKey(argKey)) {
-                    key = argKey;
-                }
-            }
-        }
-        return counter == propMap.size();
-    }
-
-    /**
-     * Returns configuration object of a given bundleID for a given retentionFeed.
-     *
-     * @param oozieClient  oozie client of cluster job is running on
-     * @param bundleID     bundleID of given cluster
-     * @throws OozieClientException
-     */
-    public static Configuration getRetentionConfiguration(OozieClient oozieClient, String bundleID)
-        throws OozieClientException {
-        waitForCoordinatorJobCreation(oozieClient, bundleID);
-        CoordinatorJob coord = null;
-        List<CoordinatorJob> coordJobs = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
-        for (CoordinatorJob coordinatorJob : coordJobs) {
-            if (coordinatorJob.getAppName().startsWith("FALCON_FEED_RETENTION")) {
-                coord = oozieClient.getCoordJobInfo(coordinatorJob.getId());
-            }
-        }
-
-        Configuration configuration = new Configuration();
-        if (coord != null) {
-            WorkflowJob wid = oozieClient.getJobInfo(coord.getActions().get(0).getExternalId());
-            configuration.addResource(new ByteArrayInputStream(wid.getConf().getBytes()));
-        } else {
-            configuration = null;
-        }
-
-        return configuration;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/TimeUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/TimeUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/TimeUtil.java
deleted file mode 100644
index 292a516..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/TimeUtil.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.falcon.regression.core.enumsAndConstants.FreqType;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-
-/**
-All time / date related util methods for merlin . need to move methods from
-instanceUtil to here , pending item.
- */
-
-public final class TimeUtil {
-
-    private TimeUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(TimeUtil.class);
-
-    public static void sleepSeconds(double seconds) {
-        long ms = (long) (seconds * 1000);
-        try {
-            TimeUnit.MILLISECONDS.sleep(ms);
-        } catch (InterruptedException e) {
-            LOGGER.info("Sleep was interrupted");
-        }
-    }
-
-    public static String get20roundedTime(String oozieBaseTime) {
-        DateTime startTime =
-            new DateTime(oozieDateToDate(oozieBaseTime), DateTimeZone.UTC);
-        if (startTime.getMinuteOfHour() < 20) {
-            startTime = startTime.minusMinutes(startTime.getMinuteOfHour());
-        } else if (startTime.getMinuteOfHour() < 40) {
-            startTime = startTime.minusMinutes(startTime.getMinuteOfHour() + 20);
-        } else {
-            startTime = startTime.minusMinutes(startTime.getMinuteOfHour() + 40);
-        }
-        return dateToOozieDate(startTime.toDate());
-    }
-
-    public static List<String> getMinuteDatesOnEitherSide(int interval, int minuteSkip) {
-        DateTime today = new DateTime(DateTimeZone.UTC);
-        LOGGER.info("today is: " + today.toString());
-        return getMinuteDatesOnEitherSide(today.minusMinutes(interval),
-            today.plusMinutes(interval), minuteSkip);
-    }
-
-    public static List<String> getMinuteDatesOnEitherSide(String startOozieDate, String endOozieDate,
-                                                          int minuteSkip) {
-        DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH/mm");
-        formatter.withZoneUTC();
-        return getMinuteDatesOnEitherSide(TimeUtil.oozieDateToDate(startOozieDate),
-            TimeUtil.oozieDateToDate(endOozieDate), minuteSkip, formatter);
-    }
-
-    public static List<String> getMinuteDatesOnEitherSide(DateTime startDate, DateTime endDate,
-                                                          int minuteSkip) {
-        DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH/mm");
-        formatter.withZoneUTC();
-        return getMinuteDatesOnEitherSide(startDate, endDate, minuteSkip, formatter);
-    }
-
-    public static List<String> getMinuteDatesOnEitherSide(String startOozieDate, String endOozieDate,
-                                                          int minuteSkip,
-                                                          DateTimeFormatter formatter) {
-        return getMinuteDatesOnEitherSide(TimeUtil.oozieDateToDate(startOozieDate),
-            TimeUtil.oozieDateToDate(endOozieDate), minuteSkip, formatter);
-    }
-
-    public static List<String> getMinuteDatesOnEitherSide(DateTime startDate, DateTime endDate,
-                                                          int minuteSkip,
-                                                          DateTimeFormatter formatter) {
-        LOGGER.info("generating data between " + formatter.print(startDate) + " and "
-                + formatter.print(endDate));
-        if (minuteSkip == 0) {
-            minuteSkip = 1;
-        }
-        List<String> dates = new ArrayList<>();
-        while (!startDate.isAfter(endDate)) {
-            dates.add(formatter.print(startDate));
-            startDate = startDate.plusMinutes(minuteSkip);
-        }
-        return dates;
-    }
-
-    /**
-     * Convert list of dates to list of string according to the supplied format.
-     *
-     * @param dates        list of dates
-     * @param formatter formatter to be used for converting dates
-     * @return list of strings corresponding to given dates
-     */
-    public static List<String> convertDatesToString(List<DateTime> dates,
-                                                    DateTimeFormatter formatter) {
-        List<String> dateString = new ArrayList<>();
-        formatter.withZoneUTC();
-        for (DateTime date : dates) {
-            dateString.add(formatter.print(date));
-        }
-        return dateString;
-    }
-
-    /**
-     * Get all possible dates between start and end date gap between subsequent dates be one unit.
-     * of freqType
-     *
-     * @param startDate start date
-     * @param endDate   end date
-     * @param freqType  type of the feed
-     * @return list of dates
-     */
-    public static List<DateTime> getDatesOnEitherSide(DateTime startDate, DateTime endDate,
-                                                      FreqType freqType) {
-        return getDatesOnEitherSide(startDate, endDate, 1, freqType);
-    }
-
-    /**
-     * Get all possible dates between start and end date gap between subsequent dates be one unit.
-     * of freqType
-     *
-     * @param startDate start date
-     * @param endDate   end date
-     * @param skip      amount of skipping
-     * @param freqType  type of the feed
-     * @return list of dates
-     */
-    public static List<DateTime> getDatesOnEitherSide(DateTime startDate, DateTime endDate,
-                                                      int skip, FreqType freqType) {
-        final List<DateTime> dates = new ArrayList<>();
-        if (!startDate.isAfter(endDate)) {
-            dates.add(startDate);
-        }
-        for (int counter = 0; !startDate.isAfter(endDate) && counter < 1000; ++counter) {
-            startDate = freqType.addTime(startDate, skip);
-            dates.add(startDate);
-        }
-        return dates;
-    }
-
-    public static String getTimeWrtSystemTime(int minutes) {
-
-        DateTime jodaTime = new DateTime(DateTimeZone.UTC);
-        if (minutes > 0) {
-            jodaTime = jodaTime.plusMinutes(minutes);
-        } else {
-            jodaTime = jodaTime.minusMinutes(-1 * minutes);
-        }
-        DateTimeFormatter fmt = OozieUtil.getOozieDateTimeFormatter();
-        DateTimeZone tz = DateTimeZone.getDefault();
-        return fmt.print(tz.convertLocalToUTC(jodaTime.getMillis(), false));
-    }
-
-    public static String addMinsToTime(String time, int minutes) {
-
-        DateTimeFormatter fmt = OozieUtil.getOozieDateTimeFormatter();
-        DateTime jodaTime = fmt.parseDateTime(time);
-        jodaTime = jodaTime.plusMinutes(minutes);
-        return fmt.print(jodaTime);
-    }
-
-    public static DateTime oozieDateToDate(String time) {
-        DateTimeFormatter fmt = OozieUtil.getOozieDateTimeFormatter();
-        fmt = fmt.withZoneUTC();
-        return fmt.parseDateTime(time);
-    }
-
-    public static String dateToOozieDate(Date dt) {
-
-        DateTime jodaTime = new DateTime(dt, DateTimeZone.UTC);
-        LOGGER.info("SystemTime: " + jodaTime);
-        DateTimeFormatter fmt = OozieUtil.getOozieDateTimeFormatter();
-        return fmt.print(jodaTime);
-    }
-
-    public static void sleepTill(String startTimeOfLateCoord) {
-
-        DateTime finalDate = new DateTime(oozieDateToDate(startTimeOfLateCoord));
-        while (true) {
-            DateTime sysDate = oozieDateToDate(getTimeWrtSystemTime(0));
-            LOGGER.info("sysDate: " + sysDate + "  finalDate: " + finalDate);
-            if (sysDate.compareTo(finalDate) > 0) {
-                break;
-            }
-            TimeUtil.sleepSeconds(15);
-        }
-    }
-
-    public static Date getMinutes(String expression, Calendar time) {
-        int hr;
-        int mins;
-        int day;
-        int month;
-        Calendar cal = Calendar.getInstance();
-        cal.setTime(time.getTime());
-        if (expression.contains("now")) {
-            hr = getInt(expression, 0);
-            mins = getInt(expression, 1);
-            cal.add(Calendar.HOUR, hr);
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("today")) {
-            hr = getInt(expression, 0);
-            mins = getInt(expression, 1);
-            cal.add(Calendar.HOUR, hr - (cal.get(Calendar.HOUR_OF_DAY)));
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("yesterday")) {
-            hr = getInt(expression, 0);
-            mins = getInt(expression, 1);
-            cal.add(Calendar.HOUR, hr - (cal.get(Calendar.HOUR_OF_DAY)) - 24);
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("currentMonth")) {
-            day = getInt(expression, 0);
-            hr = getInt(expression, 1);
-            mins = getInt(expression, 2);
-            cal.set(cal.get(Calendar.YEAR), cal.get(Calendar.MONTH), 1, 0, 0);
-            cal.add(Calendar.HOUR, 24 * day + hr);
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("lastMonth")) {
-            day = getInt(expression, 0);
-            hr = getInt(expression, 1);
-            mins = getInt(expression, 2);
-            cal.set(cal.get(Calendar.YEAR), cal.get(Calendar.MONTH) - 1, 1, 0, 0);
-            cal.add(Calendar.HOUR, 24 * day + hr);
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("currentYear")) {
-            month = getInt(expression, 0);
-            day = getInt(expression, 1);
-            hr = getInt(expression, 2);
-            mins = getInt(expression, 3);
-            cal.set(cal.get(Calendar.YEAR), 1, 1, 0, 0);
-            cal.add(Calendar.MONTH, month - 1);
-            cal.add(Calendar.HOUR, 24 * day + hr);
-            cal.add(Calendar.MINUTE, mins);
-        } else if (expression.contains("lastYear")) {
-            month = getInt(expression, 0);
-            day = getInt(expression, 1);
-            hr = getInt(expression, 2);
-            mins = getInt(expression, 3);
-            cal.set(cal.get(Calendar.YEAR) - 1, 1, 1, 0, 0);
-            cal.add(Calendar.MONTH, month - 1);
-            cal.add(Calendar.HOUR, 24 * day + hr);
-            cal.add(Calendar.MINUTE, mins);
-        }
-        return cal.getTime();
-    }
-
-    private static int getInt(String expression, int position) {
-        String numbers = expression.substring(expression.indexOf('(') + 1, expression.indexOf(')'));
-        return Integer.parseInt(numbers.split(",")[position]);
-    }
-
-    /**
-     * Converts given date from one format to another.
-     *
-     * @param date        input date
-     * @param myFormat    input date format
-     * @param userFormat  required format
-     * @return date in userFormat
-     */
-    public static String parseDate(String date, String myFormat, String userFormat) throws ParseException {
-        SimpleDateFormat formatter = new SimpleDateFormat(myFormat);
-        SimpleDateFormat fromUser = new SimpleDateFormat(userFormat);
-        String reformattedStr="";
-        try {
-            reformattedStr = formatter.format(fromUser.parse(date));
-            LOGGER.info(reformattedStr);
-        } catch (ParseException e) {
-            e.printStackTrace();
-        }
-        return reformattedStr;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UIAssert.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UIAssert.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UIAssert.java
deleted file mode 100644
index c86fb4b..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UIAssert.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.log4j.Logger;
-import org.openqa.selenium.WebElement;
-import org.testng.Assert;
-
-/**
- * Assertion related to UI testing.
- */
-public final class UIAssert {
-    private UIAssert() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(UIAssert.class);
-
-    public static void assertDisplayed(WebElement element, String webElementName) {
-        LOGGER.info(String.format("Checking if WebElement '%s' is displayed", webElementName));
-        int timeoutSeconds = 2;
-        for (int i = 0; !element.isDisplayed() && i < timeoutSeconds * 10; i++) {
-            TimeUtil.sleepSeconds(0.1);
-        }
-        Assert.assertTrue(element.isDisplayed(),
-            String.format("WebElement '%s' should have been displayed", webElementName));
-        LOGGER.info(String.format("WebElement '%s' is displayed", webElementName));
-    }
-
-    public static void assertNotDisplayed(WebElement clusterForm, String webElementName) {
-        LOGGER.info(String.format("Checking if WebElement '%s' is displayed", webElementName));
-        Assert.assertFalse(clusterForm.isDisplayed(),
-            String.format("WebElement '%s' should NOT have been displayed", webElementName));
-        LOGGER.info(String.format("WebElement '%s' is not displayed", webElementName));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UiUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UiUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UiUtil.java
deleted file mode 100644
index 6142332..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/UiUtil.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.log4j.Logger;
-import org.openqa.selenium.By;
-import org.openqa.selenium.JavascriptExecutor;
-import org.openqa.selenium.WebDriver;
-import org.openqa.selenium.WebElement;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Utility class for UI related tasks.
- */
-public final class UiUtil {
-    private UiUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(UiUtil.class);
-
-    /**
-     * Convert the element to string representation. Useful for debugging/development.
-     * @param element element to be converted
-     * @param limitDepth the depth to traverse. Typically <=3 is good value.
-     * @return
-     */
-    protected static String elementToString(WebElement element, Integer limitDepth) {
-        final StringBuilder retVal =
-            new StringBuilder("String representation of the element(first line is format):\n");
-        retVal.append("-> tagname")
-            .append("(id)")
-            .append("(classes)")
-            .append("[extra-info]")
-            .append("\t")
-            .append("text")
-            .append("\n");
-        retVal.append(elementToString("", element, limitDepth));
-        return retVal.toString();
-    }
-
-    private static StringBuilder elementToString(String prefix, WebElement element, Integer
-        limitDepth) {
-        if (limitDepth != null && limitDepth == 0) {
-            return new StringBuilder();
-        }
-        final Integer newDepth = limitDepth == null ? null : limitDepth - 1;
-        final StringBuilder elementStr = new StringBuilder(prefix);
-        List<String> extraInfo = new ArrayList<>();
-        if (StringUtils.isNotBlank(element.getAttribute("ng-repeat"))) {
-            extraInfo.add("array");
-        }
-        elementStr.append("-> ")
-            .append(element.getTagName())
-            .append("(").append(element.getAttribute("id")).append(")")
-            .append("(").append(element.getAttribute("class")).append(")")
-            .append(extraInfo)
-            .append("\t").append(StringEscapeUtils.escapeJava(element.getText()));
-        final String childPrefix = prefix + "\t";
-        final List<WebElement> childElements = element.findElements(By.xpath("./*"));
-        for (WebElement oneChildElement : childElements) {
-            StringBuilder childStr = elementToString(childPrefix, oneChildElement, newDepth);
-            if (childStr.length() > 0) {
-                elementStr.append("\n").append(childStr);
-            }
-        }
-        return elementStr;
-    }
-
-    /**
-     * Highlight the element in the UI. Useful for development/debugging.
-     * Copied from http://www.testingdiaries.com/highlight-element-using-selenium-webdriver/
-     * @param element the element to highlight
-     * @param driver the web driver in use
-     */
-    public static void elementHighlight(WebElement element, WebDriver driver) {
-        for (int i = 0; i < 2; i++) {
-            JavascriptExecutor js = (JavascriptExecutor) driver;
-            js.executeScript(
-                "arguments[0].setAttribute('style', arguments[1]);",
-                element, "color: red; border: 3px solid red;");
-            js.executeScript(
-                "arguments[0].setAttribute('style', arguments[1]);",
-                element, "");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Util.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Util.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Util.java
deleted file mode 100644
index 452effa..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/Util.java
+++ /dev/null
@@ -1,607 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonParser;
-import com.jcraft.jsch.JSchException;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.regression.Entities.ClusterMerlin;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.supportClasses.JmsMessageConsumer;
-import org.apache.falcon.request.BaseRequest;
-import org.apache.falcon.request.RequestKeys;
-import org.apache.falcon.resource.APIResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.http.HttpResponse;
-import org.apache.log4j.Logger;
-import org.joda.time.DateTime;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.testng.Assert;
-import org.w3c.dom.Document;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-
-import javax.jms.JMSException;
-import javax.jms.MapMessage;
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Source;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerConfigurationException;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.stream.StreamResult;
-import javax.xml.transform.stream.StreamSource;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.List;
-
-/**
- * util methods used across test.
- */
-public final class Util {
-
-    private Util() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(Util.class);
-
-    /**
-     * Sends request without data and user.
-     */
-    public static ServiceResponse sendRequest(String url, String method)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return sendRequest(url, method, null, null);
-    }
-
-    /**
-     * Sends api request without data.
-     */
-    public static ServiceResponse sendRequest(String url, String method, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        return sendRequest(url, method, null, user);
-    }
-
-    /**
-     * Sends api requests.
-     * @param url target url
-     * @param method request method
-     * @param data data to be places in body of request
-     * @param user user to be used to send request
-     * @return api response
-     * @throws IOException
-     * @throws URISyntaxException
-     * @throws AuthenticationException
-     */
-    public static ServiceResponse sendRequest(String url, String method, String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        BaseRequest request = new BaseRequest(url, method, user, data);
-        request.addHeader(RequestKeys.CONTENT_TYPE_HEADER, RequestKeys.XML_CONTENT_TYPE);
-        HttpResponse response = request.run();
-        return new ServiceResponse(response);
-    }
-
-    /**
-     * @param data string data
-     * @return is data should be considered as XMl
-     */
-    private static boolean isXML(String data) {
-        return data != null && data.trim().length() > 0 && data.trim().startsWith("<");
-    }
-
-    /**
-     * Converts service response to api result form.
-     * @param response service response
-     * @return api result
-     * @throws JAXBException
-     */
-    public static APIResult parseResponse(ServiceResponse response) throws JAXBException {
-        if (!isXML(response.getMessage())) {
-            return new APIResult(APIResult.Status.FAILED, response.getMessage());
-        }
-        JAXBContext jc = JAXBContext.newInstance(APIResult.class);
-        Unmarshaller u = jc.createUnmarshaller();
-        if (response.getMessage().contains("requestId")) {
-            return  (APIResult) u
-                .unmarshal(new InputSource(new StringReader(response.getMessage())));
-        } else {
-            return new APIResult(response.getCode() == 200
-                ? APIResult.Status.SUCCEEDED : APIResult.Status.FAILED, response.getMessage());
-        }
-    }
-
-    /**
-     * Lists all directories contained in a store by sub-path.
-     * @param helper cluster where store is present
-     * @param subPath sub-path
-     * @return list of all directories in the sub-path
-     * @throws IOException
-     * @throws JSchException
-     */
-    public static List<String> getStoreInfo(AbstractEntityHelper helper, String subPath)
-        throws IOException, JSchException {
-        if (helper.getStoreLocation().startsWith("hdfs:")) {
-            return HadoopUtil.getAllFilesHDFS(helper.getHadoopFS(),
-                new Path(helper.getStoreLocation() + subPath));
-        } else {
-            return ExecUtil.runRemoteScriptAsSudo(helper.getQaHost(), helper.getUsername(),
-                    helper.getPassword(), "ls " + helper.getStoreLocation()  + subPath,
-                    helper.getUsername(), helper.getIdentityFile());
-        }
-    }
-
-    /**
-     * @param data entity definition
-     * @return entity name
-     */
-    public static String readEntityName(String data) {
-        if (data.contains("uri:falcon:feed")) {
-            return new FeedMerlin(data).getName();
-        } else if (data.contains("uri:falcon:process")) {
-            return new ProcessMerlin(data).getName();
-        } else {
-            return new ClusterMerlin(data).getName();
-        }
-    }
-
-    /**
-     * Retrieves all hadoop data directories from a specific data path.
-     * @param fs filesystem
-     * @param feed feed definition
-     * @param dir specific directory
-     * @return all
-     * @throws IOException
-     */
-    public static List<String> getHadoopDataFromDir(FileSystem fs, String feed, String dir)
-        throws IOException {
-        List<String> finalResult = new ArrayList<>();
-        String feedPath = new FeedMerlin(feed).getFeedPath(LocationType.DATA);
-        int depth = feedPath.split(dir)[1].split("/").length - 1;
-        List<Path> results = HadoopUtil.getAllDirsRecursivelyHDFS(fs, new Path(dir), depth);
-        for (Path result : results) {
-            int pathDepth = result.toString().split(dir)[1].split("/").length - 1;
-            if (pathDepth == depth) {
-                finalResult.add(result.toString().split(dir)[1]);
-            }
-        }
-        return finalResult;
-    }
-
-    /**
-     * Finds first folder within a date range.
-     * @param startTime start date
-     * @param endTime end date
-     * @param folderList list of folders which are under analysis
-     * @return first matching folder or null if not present in a list
-     */
-    public static String findFolderBetweenGivenTimeStamps(DateTime startTime, DateTime endTime,
-                                                          List<String> folderList) {
-        DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH/mm");
-        for (String folder : folderList) {
-            if (folder.compareTo(formatter.print(startTime)) >= 0
-                    &&
-                folder.compareTo(formatter.print(endTime)) <= 0) {
-                return folder;
-            }
-        }
-        return null;
-    }
-
-    public static List<String> getInstanceFinishTimes(ColoHelper coloHelper, String workflowId)
-        throws IOException, JSchException {
-        List<String> raw = ExecUtil.runRemoteScriptAsSudo(coloHelper.getProcessHelper()
-                .getQaHost(), coloHelper.getProcessHelper().getUsername(),
-            coloHelper.getProcessHelper().getPassword(),
-            "cat /var/log/falcon/application.* | grep \"" + workflowId + "\" | grep "
-                    + "\"Received\" | awk '{print $2}'",
-            coloHelper.getProcessHelper().getUsername(),
-            coloHelper.getProcessHelper().getIdentityFile()
-        );
-        List<String> finalList = new ArrayList<>();
-        for (String line : raw) {
-            finalList.add(line.split(",")[0]);
-        }
-        return finalList;
-    }
-
-    public static List<String> getInstanceRetryTimes(ColoHelper coloHelper, String workflowId)
-        throws IOException, JSchException {
-        List<String> raw = ExecUtil.runRemoteScriptAsSudo(coloHelper.getProcessHelper()
-                .getQaHost(), coloHelper.getProcessHelper().getUsername(),
-            coloHelper.getProcessHelper().getPassword(),
-            "cat /var/log/falcon/application.* | grep \"" + workflowId + "\" | grep "
-                    +
-                "\"Retrying attempt\" | awk '{print $2}'",
-            coloHelper.getProcessHelper().getUsername(),
-            coloHelper.getProcessHelper().getIdentityFile()
-        );
-        List<String> finalList = new ArrayList<>();
-        for (String line : raw) {
-            finalList.add(line.split(",")[0]);
-        }
-        return finalList;
-    }
-
-    /**
-     * Shuts down falcon server on a given host using sudo credentials.
-     * @param helper given host
-     * @throws IOException
-     * @throws JSchException
-     */
-    public static void shutDownService(AbstractEntityHelper helper)
-        throws IOException, JSchException {
-        ExecUtil.runRemoteScriptAsSudo(helper.getQaHost(), helper.getUsername(),
-            helper.getPassword(), helper.getServiceStopCmd(),
-            helper.getServiceUser(), helper.getIdentityFile());
-        TimeUtil.sleepSeconds(10);
-    }
-
-    /**
-     * Start falcon server on a given host using sudo credentials and checks if it succeeds.
-     * @param helper given host
-     * @throws IOException
-     * @throws JSchException
-     * @throws AuthenticationException
-     * @throws URISyntaxException
-     */
-    public static void startService(AbstractEntityHelper helper)
-        throws IOException, JSchException, AuthenticationException, URISyntaxException,
-            InterruptedException {
-        ExecUtil.runRemoteScriptAsSudo(helper.getQaHost(), helper.getUsername(),
-            helper.getPassword(), helper.getServiceStartCmd(), helper.getServiceUser(),
-            helper.getIdentityFile());
-        int statusCode = 0;
-        for (int tries = 20; tries > 0; tries--) {
-            try {
-                statusCode = Util.sendRequest(helper.getHostname(), "get").getCode();
-            } catch (IOException e) {
-                LOGGER.info(e.getMessage());
-            }
-            if (statusCode == 200) {
-                return;
-            }
-            TimeUtil.sleepSeconds(5);
-        }
-        throw new RuntimeException("Service on" + helper.getHostname() + " did not start!");
-    }
-
-    /**
-     * Stops and starts falcon service for a given host using sudo credentials.
-     * @param helper given host
-     * @throws IOException
-     * @throws JSchException
-     * @throws AuthenticationException
-     * @throws URISyntaxException
-     */
-    public static void restartService(AbstractEntityHelper helper)
-        throws IOException, JSchException, AuthenticationException, URISyntaxException,
-        InterruptedException {
-        LOGGER.info("restarting service for: " + helper.getQaHost());
-        shutDownService(helper);
-        startService(helper);
-    }
-
-    /**
-     * Prints JMSConsumer messages content.
-     * @param messageConsumer the source JMSConsumer
-     * @throws JMSException
-     */
-    public static void printMessageData(JmsMessageConsumer messageConsumer) throws JMSException {
-        LOGGER.info("dumping all queue data:");
-        for (MapMessage mapMessage : messageConsumer.getReceivedMessages()) {
-            StringBuilder stringBuilder = new StringBuilder();
-            final Enumeration mapNames = mapMessage.getMapNames();
-            while (mapNames.hasMoreElements()) {
-                final String propName = mapNames.nextElement().toString();
-                final String propValue = mapMessage.getString(propName);
-                stringBuilder.append(propName).append('=').append(propValue).append(' ');
-            }
-            LOGGER.info(stringBuilder);
-        }
-    }
-
-    /**
-     * Get entity type according to its definition.
-     * @param entity entity which is under analysis
-     * @return entity type
-     */
-    public static EntityType getEntityType(String entity) {
-        if (entity.contains("uri:falcon:process:0.1")) {
-            return EntityType.PROCESS;
-        } else if (entity.contains("uri:falcon:cluster:0.1")) {
-            return EntityType.CLUSTER;
-        } else if (entity.contains("uri:falcon:feed:0.1")) {
-            return EntityType.FEED;
-        }
-        return null;
-    }
-
-    /**
-     * Compares two definitions.
-     * @param server1 server where 1st definition is stored
-     * @param server2 server where 2nd definition is stored
-     * @param entity entity which is under analysis
-     * @return are definitions identical
-     */
-    public static boolean isDefinitionSame(ColoHelper server1, ColoHelper server2,
-                                           String entity)
-        throws URISyntaxException, IOException, AuthenticationException, JAXBException,
-        SAXException, InterruptedException {
-        return XmlUtil.isIdentical(getEntityDefinition(server1, entity, true),
-            getEntityDefinition(server2, entity, true));
-    }
-
-    /**
-     * enums used for instance api.
-     */
-    public enum URLS {
-        LIST_URL("/api/entities/list"),
-        SUBMIT_URL("/api/entities/submit"),
-        GET_ENTITY_DEFINITION("/api/entities/definition"),
-        DEPENDENCIES("/api/entities/dependencies"),
-        DELETE_URL("/api/entities/delete"),
-        SCHEDULE_URL("/api/entities/schedule"),
-        VALIDATE_URL("/api/entities/validate"),
-        SUSPEND_URL("/api/entities/suspend"),
-        RESUME_URL("/api/entities/resume"),
-        UPDATE("/api/entities/update"),
-        STATUS_URL("/api/entities/status"),
-        ENTITY_SUMMARY("/api/entities/summary"),
-        SUBMIT_AND_SCHEDULE_URL("/api/entities/submitAndSchedule"),
-        SLA("/api/entities/sla-alert"),
-        ENTITY_LINEAGE("/api/metadata/lineage/entities"),
-        INSTANCE_RUNNING("/api/instance/running"),
-        INSTANCE_STATUS("/api/instance/status"),
-        INSTANCE_KILL("/api/instance/kill"),
-        INSTANCE_RESUME("/api/instance/resume"),
-        INSTANCE_SUSPEND("/api/instance/suspend"),
-        INSTANCE_RERUN("/api/instance/rerun"),
-        INSTANCE_SUMMARY("/api/instance/summary"),
-        INSTANCE_PARAMS("/api/instance/params"),
-        INSTANCE_TRIAGE("/api/instance/triage"),
-        INSTANCE_LIST("/api/instance/list"),
-        INSTANCE_LISTING("/api/instance/listing"),
-        INSTANCE_LOGS("/api/instance/logs"),
-        INSTANCE_DEPENDENCIES("/api/instance/dependencies"),
-        TOUCH_URL("/api/entities/touch");
-
-        private final String url;
-
-        URLS(String url) {
-            this.url = url;
-        }
-
-        public String getValue() {
-            return this.url;
-        }
-    }
-
-    /**
-     * @param pathString whole path.
-     * @return path to basic data folder
-     */
-    public static String getPathPrefix(String pathString) {
-        return pathString.substring(0, pathString.indexOf('$'));
-    }
-
-    /**
-     * @param path whole path.
-     * @return file name which is retrieved from a path
-     */
-    public static String getFileNameFromPath(String path) {
-        return path.substring(path.lastIndexOf('/') + 1, path.length());
-    }
-
-    /**
-     * Defines request type according to request url.
-     * @param url request url
-     * @return request type
-     */
-    public static String getMethodType(String url) {
-        List<String> postList = new ArrayList<>();
-        postList.add("/entities/validate");
-        postList.add("/entities/submit");
-        postList.add("/entities/submitAndSchedule");
-        postList.add("/entities/suspend");
-        postList.add("/entities/resume");
-        postList.add("/instance/kill");
-        postList.add("/instance/suspend");
-        postList.add("/instance/resume");
-        postList.add("/instance/rerun");
-        for (String item : postList) {
-            if (url.toLowerCase().contains(item)) {
-                return "post";
-            }
-        }
-        List<String> deleteList = new ArrayList<>();
-        deleteList.add("/entities/delete");
-        for (String item : deleteList) {
-            if (url.toLowerCase().contains(item)) {
-                return "delete";
-            }
-        }
-        return "get";
-    }
-
-    /**
-     * Prints xml in readable form.
-     * @param xmlString xmlString
-     * @return formatted xmlString
-     */
-    public static String prettyPrintXml(final String xmlString) {
-        if (xmlString == null) {
-            return null;
-        }
-        try {
-            Source xmlInput = new StreamSource(new StringReader(xmlString));
-            StringWriter stringWriter = new StringWriter();
-            StreamResult xmlOutput = new StreamResult(stringWriter);
-            TransformerFactory transformerFactory = TransformerFactory.newInstance();
-            transformerFactory.setAttribute("indent-number", "2");
-            Transformer transformer = transformerFactory.newTransformer();
-            transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-            transformer.transform(xmlInput, xmlOutput);
-            return xmlOutput.getWriter().toString();
-        } catch (TransformerConfigurationException e) {
-            return xmlString;
-        } catch (TransformerException e) {
-            return xmlString;
-        }
-    }
-
-    /**
-     * Converts json string to readable form.
-     * @param jsonString json string
-     * @return formatted string
-     */
-    public static String prettyPrintJson(final String jsonString) {
-        if (jsonString == null) {
-            return null;
-        }
-        Gson gson = new GsonBuilder().setPrettyPrinting().create();
-        JsonElement json = new JsonParser().parse(jsonString);
-        return gson.toJson(json);
-    }
-
-    /**
-     * Prints xml or json in pretty and readable format.
-     * @param str xml or json string
-     * @return converted xml or json
-     */
-    public static String prettyPrintXmlOrJson(final String str) {
-        if (str == null) {
-            return null;
-        }
-        String cleanStr = str.trim();
-        //taken from http://stackoverflow.com/questions/7256142/way-to-quickly-check-if-string-is-xml-or-json-in-c-sharp
-        if (cleanStr.startsWith("{") || cleanStr.startsWith("[")) {
-            return prettyPrintJson(cleanStr);
-        }
-        if (cleanStr.startsWith("<")) {
-            return prettyPrintXml(cleanStr);
-        }
-        LOGGER.warn("The string does not seem to be either json or xml: " + cleanStr);
-        return str;
-    }
-
-    /**
-     * Tries to get entity definition.
-     * @param cluster cluster where definition is stored
-     * @param entity entity for which definition is required
-     * @param shouldReturn should the definition be successfully retrieved or not
-     * @return entity definition
-     */
-    public static String getEntityDefinition(ColoHelper cluster,
-                                             String entity,
-                                             boolean shouldReturn) throws
-            JAXBException,
-            IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        EntityType type = getEntityType(entity);
-        AbstractEntityHelper helper;
-        if (EntityType.PROCESS == type) {
-            helper = cluster.getProcessHelper();
-        } else if (EntityType.FEED == type) {
-            helper = cluster.getFeedHelper();
-        } else {
-            helper = cluster.getClusterHelper();
-        }
-        ServiceResponse response = helper.getEntityDefinition(entity);
-        if (shouldReturn) {
-            AssertUtil.assertSucceeded(response);
-        } else {
-            AssertUtil.assertFailed(response);
-        }
-        String result = response.getMessage();
-        Assert.assertNotNull(result);
-        return result;
-    }
-
-    /**
-     * Get prefix for test entities.
-     * @param testClass object of test class
-     * @return test class name if is_deprecate=false or 'A' and hash if is_deprecate=true
-     */
-    public static String getEntityPrefix(Object testClass) {
-        String className = testClass.getClass().getSimpleName();
-        if (MerlinConstants.IS_DEPRECATE) {
-            return 'A' + Integer.toHexString(className.hashCode());
-        } else {
-            return className;
-        }
-    }
-
-    /**
-     * Converts string to xml document.
-     * @param xmlStr string representation
-     * @return document representation.
-     */
-    public static Document convertStringToDocument(String xmlStr) {
-        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-        DocumentBuilder builder;
-        try {
-            builder = factory.newDocumentBuilder();
-            Document doc = builder.parse(new InputSource(new StringReader(xmlStr)));
-            return doc;
-        } catch (Exception e) {
-            e.printStackTrace();
-        }
-        return null;
-    }
-
-    /**
-     * Sends api requests.
-     * @param url target url
-     * @param method request method
-     * @param data data to be places in body of request
-     * @param user user to be used to send request
-     * @return api response
-     * @throws IOException
-     * @throws URISyntaxException
-     * @throws AuthenticationException
-     */
-    public static ServiceResponse sendJSONRequest(String url, String method, String data, String user)
-        throws IOException, URISyntaxException, AuthenticationException, InterruptedException {
-        BaseRequest request = new BaseRequest(url, method, user, data);
-        request.addHeader(RequestKeys.CONTENT_TYPE_HEADER, RequestKeys.JSON_CONTENT_TYPE);
-        HttpResponse response = request.run();
-        return new ServiceResponse(response);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/XmlUtil.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/XmlUtil.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/XmlUtil.java
deleted file mode 100644
index 1041910..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/core/util/XmlUtil.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.core.util;
-
-import org.custommonkey.xmlunit.Diff;
-import org.custommonkey.xmlunit.XMLUnit;
-import org.apache.log4j.Logger;
-import org.xml.sax.SAXException;
-
-import java.io.IOException;
-
-/**
- * Util methods for XML.
- */
-public final class XmlUtil {
-
-    private XmlUtil() {
-        throw new AssertionError("Instantiating utility class...");
-    }
-    private static final Logger LOGGER = Logger.getLogger(XmlUtil.class);
-
-    public static boolean isIdentical(String expected, String actual)
-        throws IOException, SAXException {
-        XMLUnit.setIgnoreWhitespace(true);
-        XMLUnit.setIgnoreAttributeOrder(true);
-        Diff diff = XMLUnit.compareXML(expected, actual);
-        LOGGER.info(diff);
-        return diff.identical();
-    }
-}


[03/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceKillsTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceKillsTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceKillsTest.java
deleted file mode 100644
index 48c8021..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceKillsTest.java
+++ /dev/null
@@ -1,369 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.Arrays;
-import java.util.List;
-
-/**
- * Process instance kill tests.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceKillsTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceKillsTest.class);
-    private String processName;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        processName = bundles[0].getProcessName();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedule process. Perform -kill action for only one instance. Check that action
-     * succeeded and only one instance was killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillSingle() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:01Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.KILLED);
-    }
-
-    /**
-     * Schedule process. Check that in case when -start and -end parameters are equal zero
-     * instances are killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillStartAndEndSame() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T00:00Z", "2010-01-02T04:00Z");
-        bundles[0].setProcessConcurrency(2);
-        bundles[0].setProcessTimeOut(3, TimeUnit.minutes);
-        bundles[0].setProcessPeriodicity(1, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(10);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 2,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper()
-                .getProcessInstanceKill(processName, "?start=2010-01-02T00:03Z&end=2010-01-02T00:03Z");
-        Assert.assertNull(r.getInstances(), "There should be zero instances killed");
-    }
-
-    /**
-     * Schedule process. Provide data for all instances except the last,
-     * thus making it non-materialized (waiting). Try to -kill last 3 instances.
-     * Check that only running instances were affected.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillKillNotRunning() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T00:00Z", "2010-01-02T00:26Z");
-        bundles[0].setProcessTimeOut(3, TimeUnit.minutes);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setInputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-
-        //create data for first 5 instances, 6th should be non-materialized
-        String bundleId = OozieUtil.getSequenceBundleID(clusterOC, processName,
-            EntityType.PROCESS, 0);
-        for(CoordinatorJob c : clusterOC.getBundleJobInfo(bundleId).getCoordinators()) {
-            List<CoordinatorAction> actions = clusterOC.getCoordJobInfo(c.getId()).getActions();
-            if (actions.size() == 6) {
-                for(int i = 0; i < 5; i++) {
-                    CoordinatorAction action = actions.get(i);
-                    HadoopUtil.createHDFSFolders(cluster, Arrays
-                        .asList(action.getMissingDependencies().split("#")));
-                }
-                break;
-            }
-        }
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 3);
-        InstancesResult r = prism.getProcessHelper()
-                .getProcessInstanceKill(processName, "?start=2010-01-02T00:14Z&end=2010-01-02T00:26Z");
-        InstanceUtil.validateResponse(r, 3, 0, 0, 1, 2);
-        LOGGER.info(r.toString());
-    }
-
-    /**
-     * Generate data. Schedule process. Try to perform -kill
-     * operation using -start and -end which are both in future with respect to process start.
-     *
-     * @throws Exception TODO amend test with validations
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillBothStartAndEndInFuture01() throws Exception {
-        /*
-        both start and end r in future with respect to process start end
-         */
-        String startTime = TimeUtil.getTimeWrtSystemTime(-20);
-        String endTime = TimeUtil.getTimeWrtSystemTime(400);
-        String startTimeData = TimeUtil.getTimeWrtSystemTime(-50);
-        String endTimeData = TimeUtil.getTimeWrtSystemTime(50);
-
-        bundles[0].setInputFeedDataPath(feedInputPath.replace("input/", "input01/"));
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        String startTimeRequest = TimeUtil.getTimeWrtSystemTime(-17);
-        String endTimeRequest = TimeUtil.getTimeWrtSystemTime(23);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=" + startTimeRequest + "&end=" + endTimeRequest);
-        LOGGER.info(r.toString());
-    }
-
-    /**
-     * Schedule process. Check that -kill action is not performed when time range between -start
-     * and -end parameters is in future and don't include existing instances.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillBothStartAndEndInFuture() throws Exception {
-        /*
-         both start and end r in future with respect to current time
-          */
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2099-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        String startTime = TimeUtil.getTimeWrtSystemTime(1);
-        String endTime = TimeUtil.getTimeWrtSystemTime(40);
-        InstancesResult r = prism.getProcessHelper()
-                .getProcessInstanceKill(processName, "?start=" + startTime + "&end=" + endTime);
-        LOGGER.info(r.getMessage());
-        Assert.assertEquals(r.getInstances(), null);
-    }
-
-    /**
-     * Schedule process. Perform -kill action within time range which includes 3 running instances.
-     * Get status of instances within wider range. Check that only mentioned 3 instances are
-     * killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillMultipleInstance() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper()
-                .getProcessInstanceKill(processName, "?start=2010-01-02T01:05Z&end=2010-01-02T01:16Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(r, 5, 2, 0, 0, 3);
-    }
-
-    /**
-     * Schedule process. Perform -kill action on last expected instance. Get status of instances
-     * which are in wider range. Check that only last is killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillLastInstance() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 10);
-        prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=2010-01-02T01:20Z&end=2010-01-02T01:21Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(r, 5, 4, 0, 0, 1);
-    }
-
-    /**
-     * Schedule process. Suspend one running instance. Perform -kill action on it. Check that
-     * mentioned instance is really killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillSuspended() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:04Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:04Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.KILLED);
-    }
-
-    /**
-     * Schedule single instance process. Wait till it finished. Try to kill the instance. Check
-     * that instance still succeeded.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillSucceeded() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(0), processName, 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:04Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.SUCCEEDED);
-    }
-
-    /**
-     * Schedule process. Perform -kill action using only -start parameter. Check that action
-     * succeeded and only one instance was killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillWOEndParam() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?start=2010-01-02T01:00Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Perform -kill action using only -end parameter. Check that action
-     * succeeded and only one instance was killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillWOStartParam() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                "?end=2010-01-02T01:01Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Perform -kill action without start or end params. Check that action
-     * succeeded and only one instance was killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceKillWOParams() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                null);
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRerunTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRerunTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRerunTest.java
deleted file mode 100644
index d5b4ef2..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRerunTest.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.entity.v0.process.Properties;
-import org.apache.falcon.entity.v0.process.Property;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.WorkflowJob.Status;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * Test Suite for instance rerun.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceRerunTest extends BaseTestClass {
-
-    private boolean restartRequired;
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private String feedInputPath = baseTestDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private String feedInputTimedOutPath = baseTestDir + "/timedout" + MINUTE_DATE_PATTERN;
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceRerunTest.class);
-    private static final double TIMEOUT = 10;
-    private String processName;
-    private String start = "?start=2010-01-02T01:00Z";
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        processName = bundles[0].getProcessName();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedule process. Kill some instances. Rerun some of that killed. Check that
-     * instances were rerun correctly and other are still killed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunSomeKilled02() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-            start + "&end=2010-01-02T01:16Z");
-        InstanceUtil.validateResponse(r, 4, 0, 0, 0, 4);
-        List<String> wfIDs = InstanceUtil.getWorkflows(clusterOC, processName);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:11Z");
-        InstanceUtil.areWorkflowsRunning(clusterOC, wfIDs, 6, 5, 1, 0);
-    }
-
-    /**
-     * Schedule process. Kill some instances. Rerun some of these killed without using -start or
-     * -end parameters. Should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunKilledWOParams() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                start + "&end=2010-01-02T01:16Z");
-        InstanceUtil.validateResponse(r, 4, 0, 0, 0, 4);
-        r = prism.getProcessHelper().getProcessInstanceRerun(processName,
-                null);
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Kill some instances. Rerun some of these killed using only
-     * -end parameter. Should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunKilledWOStartParam() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-            start + "&end=2010-01-02T01:16Z");
-        InstanceUtil.validateResponse(r, 4, 0, 0, 0, 4);
-        r = prism.getProcessHelper().getProcessInstanceRerun(processName,
-                "?end=2010-01-02T01:11Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Kill some instances. Rerun some of these killed using only
-     * -start parameter. Should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunKilledWOEndParam() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceKill(processName,
-                start + "&end=2010-01-02T01:16Z");
-        InstanceUtil.validateResponse(r, 4, 0, 0, 0, 4);
-        r = prism.getProcessHelper().getProcessInstanceRerun(processName,
-                start);
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Kill all instances. Rerun them. Check that they were rerun.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunMultipleKilled() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        String process = bundles[0].getProcessData();
-        LOGGER.info("process: " + Util.prettyPrintXml(process));
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper()
-            .getProcessInstanceKill(processName, start + "&end=2010-01-02T01:11Z");
-        InstanceUtil.validateResponse(r, 3, 0, 0, 0, 3);
-        List<String> wfIDs =  InstanceUtil.getWorkflows(clusterOC, processName);
-        prism.getProcessHelper().
-            getProcessInstanceRerun(processName, start + "&end=2010-01-02T01:11Z");
-        InstanceUtil.areWorkflowsRunning(clusterOC, wfIDs, 3, 3, 0, 0);
-    }
-
-    /**
-     * Schedule process. Kill some instances. Rerun them. Check that there are no killed
-     * instances left.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunSomeKilled01() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper()
-            .getProcessInstanceKill(processName, start + "&end=2010-01-02T01:11Z");
-        InstanceUtil.validateResponse(r, 3, 0, 0, 0, 3);
-        List<String> wfIDs = InstanceUtil.getWorkflows(clusterOC, processName);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:11Z");
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstanceUtil.areWorkflowsRunning(clusterOC, wfIDs, 6, 6, 0, 0);
-    }
-
-    /**
-     * Schedule process. Kill single instance. Rerun it. Check it was rerun.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunSingleKilled() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper().getProcessInstanceKill(processName,
-            start + "&end=2010-01-02T01:01Z");
-        String wfID = InstanceUtil.getWorkflows(clusterOC, processName, Status.KILLED).get(0);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:01Z");
-        Assert.assertTrue(InstanceUtil.isWorkflowRunning(clusterOC, wfID));
-    }
-
-    /**
-     * Schedule process. Wait till it got succeeded. Rerun first succeeded instance. Check if it
-     * is running.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunSingleSucceeded() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String process = bundles[0].getProcessData();
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        String wfID = InstanceUtil.getWorkflows(clusterOC, processName, Status.RUNNING,
-            Status.SUCCEEDED).get(0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 0, CoordinatorAction
-            .Status.SUCCEEDED, EntityType.PROCESS);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:01Z&force=true");
-        Assert.assertTrue(InstanceUtil.isWorkflowRunning(clusterOC, wfID));
-    }
-
-    /**
-     * Schedule process. Suspend its instances. Try to rerun them. Check that instances weren't
-     * rerun and are still suspended.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunSingleSuspended() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:06Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(2);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 2,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            start + "&end=2010-01-02T01:06Z");
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:06Z");
-        Assert.assertEquals(InstanceUtil.getInstanceStatus(clusterOC, processName, 0, 1),
-            CoordinatorAction.Status.SUSPENDED);
-    }
-
-    /**
-     * Schedule process. Wait till its instances succeed. Rerun them all. Check they are running.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunMultipleSucceeded() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:08Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(2);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 2,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        List<String> wfIDs = InstanceUtil.getWorkflows(clusterOC, processName);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-                start + "&end=2010-01-02T01:11Z&force=true");
-        InstanceUtil.areWorkflowsRunning(clusterOC, wfIDs, 2, 2, 0, 0);
-    }
-
-    /**
-     * Schedule process with invalid input feed data path. Wait till process got timed-out. Rerun
-     * it's instances. Check that they were rerun and are waiting (wait for input data).
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceRerunTimedOut() throws Exception {
-        bundles[0].setInputFeedDataPath(feedInputTimedOutPath);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setProcessTimeOut(2, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        CoordinatorAction.Status s;
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.TIMEDOUT, EntityType.PROCESS);
-        prism.getProcessHelper().getProcessInstanceRerun(processName,
-            start + "&end=2010-01-02T01:11Z");
-        s = InstanceUtil.getInstanceStatus(clusterOC, processName, 0, 0);
-        Assert.assertEquals(s, CoordinatorAction.Status.WAITING,
-                "instance should have been in WAITING state");
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 1200000)
-    public void testProcessInstanceRerunFailedPostProcessing() throws Exception {
-        restartRequired=true;
-        bundles[0].setProcessValidity("2015-01-02T01:00Z", "2015-01-02T01:04Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-
-        //bring down Server1 colo
-        Util.shutDownService(cluster.getClusterHelper());
-
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-
-        //wait for instance to go in killing state
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            CoordinatorAction.Status.KILLED, EntityType.PROCESS, 5);
-
-        Assert.assertEquals(OozieUtil.getWorkflowActionStatus(clusterOC, bundleId, "post-processing")
-            .contains("KILLED"), true);
-        Assert.assertEquals(OozieUtil.getWorkflowActionStatus(clusterOC, bundleId, "user-action")
-            .contains("SUCCEEDED"), true);
-
-        //start Server1 colo
-        Util.startService(cluster.getClusterHelper());
-        TimeUtil.sleepSeconds(10);
-
-        prism.getProcessHelper().getProcessInstanceRerun(processName, "?start=2015-01-02T01:00Z&end=2015-01-02T01:04Z");
-
-        while (!OozieUtil.getWorkflowActionStatus(clusterOC, bundleId, "post-processing").contains("SUCCEEDED")) {
-            TimeUtil.sleepSeconds(10);
-        }
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 1200000)
-    public void testProcessInstanceRerunFailedWorkflowAction() throws Exception {
-
-        // Defining path to be used in pig script
-        String propPath = cleanAndGetTestDir() + "/rerun";
-        org.apache.falcon.entity.v0.process.Process processElement = bundles[0].getProcessObject();
-        Properties properties = new Properties();
-        Property propertyInput = new Property();
-        propertyInput.setName("inputPath");
-        propertyInput.setValue(propPath);
-
-        Property propertyOutput = new Property();
-        propertyOutput.setName("outputPath");
-        propertyOutput.setValue(propPath + "/output");
-        properties.getProperties().add(propertyInput);
-        properties.getProperties().add(propertyOutput);
-        processElement.setProperties(properties);
-        bundles[0].setProcessData(processElement.toString());
-
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.MULTIPLE_ACTION_WORKFLOW);
-        HadoopUtil.copyDataToFolder(clusterFS, aggregateWorkflowDir, OSUtil.concat(OSUtil.PIG_DIR, "id.pig"));
-
-        bundles[0].setProcessValidity("2015-01-02T01:00Z", "2015-01-02T01:04Z");
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-
-        //wait for instance to get killed
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.KILLED, EntityType.PROCESS, 5);
-
-        Assert.assertEquals(OozieUtil.getWorkflowActionStatus(clusterOC, bundleId, "user-action")
-                .contains("KILLED"), true);
-        Assert.assertEquals(OozieUtil.getSubWorkflowActionStatus(clusterOC, bundleId, "user-action", "pig")
-                .contains("KILLED"), true);
-        Assert.assertEquals(OozieUtil.getSubWorkflowActionStatus(clusterOC, bundleId, "user-action", "aggregator")
-                .contains("SUCCEEDED"), true);
-
-        HadoopUtil.uploadDir(clusterFS, propPath, OSUtil.MULTIPLE_ACTION_WORKFLOW);
-
-        prism.getProcessHelper().getProcessInstanceRerun(processName, "?start=2015-01-02T01:00Z&end=2015-01-02T01:04Z");
-
-        while (!OozieUtil.getSubWorkflowActionStatus(clusterOC, bundleId, "user-action", "pig").contains("SUCCEEDED")) {
-            TimeUtil.sleepSeconds(10);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceResumeTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceResumeTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceResumeTest.java
deleted file mode 100644
index b7f4428..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceResumeTest.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Process instance resume tests.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceResumeTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceResumeTest.class);
-    private String processName;
-    private String wholeRange = "?start=2010-01-02T01:00Z&end=2010-01-02T01:26Z";
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(6);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:26Z");
-        processName = bundles[0].getProcessName();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedule process. Suspend some instances. Attempt to -resume instance using single -end
-     * parameter. Instances up to the end date (exclusive) will be resumed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeOnlyEnd() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:05Z&end=2010-01-02T01:21Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            wholeRange);
-        InstanceUtil.validateResponse(r, 6, 2, 4, 0, 0);
-        r = prism.getProcessHelper().getProcessInstanceResume(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        InstanceUtil.validateResponse(r, 3, 3, 0, 0, 0);
-    }
-
-    /**
-     * Schedule process. Suspend some instances. Try to perform -resume using time range which
-     * effects only on one instance. Check that this instance was resumed es expected.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeResumeSome() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:05Z&end=2010-01-02T01:21Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            wholeRange);
-        InstanceUtil.validateResponse(r, 6, 2, 4, 0, 0);
-        prism.getProcessHelper().getProcessInstanceResume(processName,
-            "?start=2010-01-02T01:05Z&end=2010-01-02T01:16Z");
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName, wholeRange);
-        InstanceUtil.validateResponse(r, 6, 5, 1, 0, 0);
-    }
-
-    /**
-     * Schedule process. Suspend some instances. Try to perform -resume using time range which
-     * effects on all instances. Check that there are no suspended instances.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeResumeMany() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        String withinRange = "?start=2010-01-02T01:05Z&end=2010-01-02T01:21Z";
-        prism.getProcessHelper().getProcessInstanceSuspend(processName, withinRange);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            wholeRange);
-        InstanceUtil.validateResponse(r, 6, 2, 4, 0, 0);
-        prism.getProcessHelper().getProcessInstanceResume(processName, withinRange);
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName, wholeRange);
-        InstanceUtil.validateResponse(r, 6, 6, 0, 0, 0);
-    }
-
-    /**
-     * Schedule process. Suspend first instance. Resume that instance using only -start parameter.
-     * Check that mentioned instance was resumed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeSingle() throws Exception {
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 2);
-        String param = "?start=2010-01-02T01:00Z&end=2010-01-02T01:26Z";
-        prism.getProcessHelper().getProcessInstanceSuspend(processName, param);
-        prism.getProcessHelper().getProcessInstanceResume(processName, param);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName, param);
-        InstanceUtil.validateResponse(r, 6, 1, 0, 5, 0);
-    }
-
-    /**
-     * Attempt to resume instances of non-existent process should fail with an appropriate
-     * status code.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeNonExistent() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceResume("invalidName",
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Attempt to perform -resume action without time range parameters should fail with an
-     + appropriate status code or message. Will fail now due to jira: https://issues.apache.org/jira/browse/FALCON-710
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeNoParams() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceResume(processName, null);
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Attempt to perform -resume action without -end parameter. Should fail with an
-     + appropriate status code or message.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeWOEndParam() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceResume(processName, "?start=2010-01-02T01:00Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Attempt to perform -resume action without -start parameter. Should fail with an
-     + appropriate status code or message.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeWOStartParam() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceResume(processName, "?end=2010-01-02T01:15Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process, remove it. Try to -resume it's instance. Attempt should fail with
-     * an appropriate status code.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeDeleted() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        prism.getProcessHelper().delete(bundles[0].getProcessData());
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceResume(processName,
-            "?start=2010-01-02T01:05Z");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Schedule process. Try to resume entity which wasn't suspended.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeNonSuspended() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        String start = "?start=2010-01-02T01:05Z&end=2010-01-02T01:26Z";
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName, start);
-        InstanceUtil.validateResponse(r, 5, 5, 0, 0, 0);
-        r = prism.getProcessHelper().getProcessInstanceResume(processName, start);
-        InstanceUtil.validateResponse(r, 5, 5, 0, 0, 0);
-    }
-
-    /**
-     * Schedule process. Suspend last instance. Resume it using parameter which points to
-     * expected materialization time of last instance. Check that there are no suspended
-     * instances among all which belong to current process.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceResumeLastInstance() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 6,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        String last = "?start=2010-01-02T01:25Z&end=2010-01-02T01:26Z";
-        prism.getProcessHelper().getProcessInstanceSuspend(processName, last);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            wholeRange);
-        InstanceUtil.validateResponse(r, 6, 5, 1, 0, 0);
-        prism.getProcessHelper().getProcessInstanceResume(processName, last);
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName, wholeRange);
-        InstanceUtil.validateResponse(r, 6, 6, 0, 0, 0);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRunningTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRunningTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRunningTest.java
deleted file mode 100644
index 98fdcca..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceRunningTest.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Regression for instance running api.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceRunningTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceRunningTest.class);
-    private static final double TIMEOUT = 15;
-    private String processName;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        processName = bundles[0].getProcessName();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Run process. Suspend it and then resume. Get all -running instances. Response should
-     * contain all process instances.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getResumedProcessInstance() throws Exception {
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        String process = bundles[0].getProcessData();
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(process));
-        TimeUtil.sleepSeconds(TIMEOUT);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().resume(process));
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-    }
-
-    /**
-     * Run process. Suspend it. Try to get -running instances. Response should be
-     * successful but shouldn't contain any instance.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getSuspendedProcessInstance() throws Exception {
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(bundles[0].getProcessData()));
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccessWOInstances(r);
-    }
-
-    /**
-     * Run process. Get -running instances. Check that response contains expected number of
-     * instances.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getRunningProcessInstance() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-    }
-
-    /**
-     * Attempt to get -running instances of nonexistent process should result in error.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getNonExistenceProcessInstance() throws Exception {
-        InstancesResult r = prism.getProcessHelper().getRunningInstance("invalidName");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Attempt to get -running instances of deleted process should result in error.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getKilledProcessInstance() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        prism.getProcessHelper().delete(bundles[0].getProcessData());
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Launch process and wait till it got succeeded. Try to get -running instances. Response
-     * should reflect success but shouldn't contain any of instances.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getSucceededProcessInstance() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        OozieUtil.waitForBundleToReachState(clusterOC, processName, Job.Status.SUCCEEDED);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccessWOInstances(r);
-    }
-}


[34/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
deleted file mode 100644
index 9b1e1f4..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
+++ /dev/null
@@ -1,522 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.json.simple.JSONValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TimeZone;
-
-
-/**
- * Captures the workflow execution context.
- */
-public class WorkflowExecutionContext {
-
-    private static final Logger LOG = LoggerFactory.getLogger(WorkflowExecutionContext.class);
-
-    public static final String INSTANCE_FORMAT = "yyyy-MM-dd-HH-mm"; // nominal time
-
-    public static final String OUTPUT_FEED_SEPARATOR = ",";
-    public static final String INPUT_FEED_SEPARATOR = "#";
-    public static final String CLUSTER_NAME_SEPARATOR = ",";
-
-    /**
-     * Workflow execution status.
-     */
-    public enum Status {WAITING, RUNNING, SUSPENDED, SUCCEEDED, FAILED, TIMEDOUT, KILLED}
-
-    /**
-     * Workflow execution type.
-     */
-    public enum Type {PRE_PROCESSING, POST_PROCESSING, WORKFLOW_JOB, COORDINATOR_ACTION}
-
-    /**
-     * Entity operations supported.
-     */
-    public enum EntityOperations {
-        GENERATE, DELETE, REPLICATE, IMPORT, EXPORT
-    }
-
-    public static final WorkflowExecutionArgs[] USER_MESSAGE_ARGS = {
-        WorkflowExecutionArgs.CLUSTER_NAME,
-        WorkflowExecutionArgs.ENTITY_NAME,
-        WorkflowExecutionArgs.ENTITY_TYPE,
-        WorkflowExecutionArgs.NOMINAL_TIME,
-        WorkflowExecutionArgs.OPERATION,
-
-        WorkflowExecutionArgs.OUTPUT_FEED_NAMES,
-        WorkflowExecutionArgs.OUTPUT_FEED_PATHS,
-
-        WorkflowExecutionArgs.WORKFLOW_ID,
-        WorkflowExecutionArgs.WORKFLOW_USER,
-        WorkflowExecutionArgs.RUN_ID,
-        WorkflowExecutionArgs.STATUS,
-        WorkflowExecutionArgs.TIMESTAMP,
-        WorkflowExecutionArgs.LOG_DIR,
-    };
-
-    private final Map<WorkflowExecutionArgs, String> context;
-    private final long creationTime;
-    private Configuration actionJobConf;
-
-    public WorkflowExecutionContext(Map<WorkflowExecutionArgs, String> context) {
-        this.context = context;
-        creationTime = System.currentTimeMillis();
-    }
-
-    public String getValue(WorkflowExecutionArgs arg) {
-        return context.get(arg);
-    }
-
-    public void setValue(WorkflowExecutionArgs arg, String value) {
-        context.put(arg, value);
-    }
-
-    public String getValue(WorkflowExecutionArgs arg, String defaultValue) {
-        return context.containsKey(arg) ? context.get(arg) : defaultValue;
-    }
-
-    public boolean containsKey(WorkflowExecutionArgs arg) {
-        return context.containsKey(arg);
-    }
-
-    public Set<Map.Entry<WorkflowExecutionArgs, String>> entrySet() {
-        return context.entrySet();
-    }
-
-    // helper methods
-    public boolean hasWorkflowSucceeded() {
-        return Status.SUCCEEDED.name().equals(getValue(WorkflowExecutionArgs.STATUS));
-    }
-
-    public boolean hasWorkflowFailed() {
-        return Status.FAILED.name().equals(getValue(WorkflowExecutionArgs.STATUS));
-    }
-
-    public boolean isWorkflowKilledManually(){
-        try {
-            return WorkflowEngineFactory.getWorkflowEngine().
-                    isWorkflowKilledByUser(
-                            getValue(WorkflowExecutionArgs.CLUSTER_NAME),
-                            getValue(WorkflowExecutionArgs.WORKFLOW_ID));
-        } catch (Exception e) {
-            LOG.error("Got Error in getting error codes from actions: " + e);
-        }
-        return false;
-    }
-
-    public boolean hasWorkflowTimedOut() {
-        return Status.TIMEDOUT.name().equals(getValue(WorkflowExecutionArgs.STATUS));
-    }
-
-    public boolean hasWorkflowBeenKilled() {
-        return Status.KILLED.name().equals(getValue(WorkflowExecutionArgs.STATUS));
-    }
-
-    public String getContextFile() {
-        return getValue(WorkflowExecutionArgs.CONTEXT_FILE);
-    }
-
-    public Status getWorkflowStatus() {
-        return Status.valueOf(getValue(WorkflowExecutionArgs.STATUS));
-    }
-
-    public String getLogDir() {
-        return getValue(WorkflowExecutionArgs.LOG_DIR);
-    }
-
-    public String getLogFile() {
-        return getValue(WorkflowExecutionArgs.LOG_FILE);
-    }
-
-    String getNominalTime() {
-        return getValue(WorkflowExecutionArgs.NOMINAL_TIME);
-    }
-
-    /**
-     * Returns nominal time as a ISO8601 formatted string.
-     * @return a ISO8601 formatted string
-     */
-    public String getNominalTimeAsISO8601() {
-        return SchemaHelper.formatDateUTCToISO8601(getNominalTime(), INSTANCE_FORMAT);
-    }
-
-    String getTimestamp() {
-        return getValue(WorkflowExecutionArgs.TIMESTAMP);
-    }
-
-    /**
-     * Returns timestamp as a long.
-     * @return Date as long (milliseconds since epoch) for the timestamp.
-     */
-    public long getTimeStampAsLong() {
-        String dateString = getTimestamp();
-        try {
-            DateFormat dateFormat = new SimpleDateFormat(INSTANCE_FORMAT.substring(0, dateString.length()));
-            dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-            return dateFormat.parse(dateString).getTime();
-        } catch (java.text.ParseException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /**
-     * Returns timestamp as a ISO8601 formatted string.
-     * @return a ISO8601 formatted string
-     */
-    public String getTimeStampAsISO8601() {
-        return SchemaHelper.formatDateUTCToISO8601(getTimestamp(), INSTANCE_FORMAT);
-    }
-
-    public String getClusterName() {
-        String value =  getValue(WorkflowExecutionArgs.CLUSTER_NAME);
-        if (EntityOperations.REPLICATE != getOperation()) {
-            return value;
-        }
-
-        return value.split(CLUSTER_NAME_SEPARATOR)[0];
-    }
-
-    public String getSrcClusterName() {
-        String value =  getValue(WorkflowExecutionArgs.CLUSTER_NAME);
-        if (EntityOperations.REPLICATE != getOperation()) {
-            return value;
-        }
-
-        String[] parts = value.split(CLUSTER_NAME_SEPARATOR);
-        if (parts.length != 2) {
-            throw new IllegalArgumentException("Replicated cluster pair is missing in " + value);
-        }
-
-        return parts[1];
-    }
-
-    public String getEntityName() {
-        return getValue(WorkflowExecutionArgs.ENTITY_NAME);
-    }
-
-    public String getEntityType() {
-        return getValue(WorkflowExecutionArgs.ENTITY_TYPE).toUpperCase();
-    }
-
-    public EntityOperations getOperation() {
-        if (getValue(WorkflowExecutionArgs.OPERATION) != null) {
-            return EntityOperations.valueOf(getValue(WorkflowExecutionArgs.OPERATION));
-        }
-        return EntityOperations.valueOf(getValue(WorkflowExecutionArgs.DATA_OPERATION));
-    }
-
-    public String getOutputFeedNames() {
-        return getValue(WorkflowExecutionArgs.OUTPUT_FEED_NAMES);
-    }
-
-    public String[] getOutputFeedNamesList() {
-        return getOutputFeedNames().split(OUTPUT_FEED_SEPARATOR);
-    }
-
-    public String getOutputFeedInstancePaths() {
-        return getValue(WorkflowExecutionArgs.OUTPUT_FEED_PATHS);
-    }
-
-    public String[] getOutputFeedInstancePathsList() {
-        return getOutputFeedInstancePaths().split(OUTPUT_FEED_SEPARATOR);
-    }
-
-    public String getInputFeedNames() {
-        return getValue(WorkflowExecutionArgs.INPUT_FEED_NAMES);
-    }
-
-    public String[] getInputFeedNamesList() {
-        return getInputFeedNames().split(INPUT_FEED_SEPARATOR);
-    }
-
-    public String getInputFeedInstancePaths() {
-        return getValue(WorkflowExecutionArgs.INPUT_FEED_PATHS);
-    }
-
-    public String[] getInputFeedInstancePathsList() {
-        return getInputFeedInstancePaths().split(INPUT_FEED_SEPARATOR);
-    }
-
-    public String getWorkflowEngineUrl() {
-        return getValue(WorkflowExecutionArgs.WF_ENGINE_URL);
-    }
-
-    public String getUserWorkflowEngine() {
-        return getValue(WorkflowExecutionArgs.USER_WORKFLOW_ENGINE);
-    }
-
-    public String getUserWorkflowVersion() {
-        return getValue(WorkflowExecutionArgs.USER_WORKFLOW_VERSION);
-    }
-
-    public String getWorkflowId() {
-        return getValue(WorkflowExecutionArgs.WORKFLOW_ID);
-    }
-
-    public String getWorkflowParentId() {
-        return getValue(WorkflowExecutionArgs.PARENT_ID);
-    }
-
-    public String getUserSubflowId() {
-        return getValue(WorkflowExecutionArgs.USER_SUBFLOW_ID);
-    }
-
-    public int getWorkflowRunId() {
-        return Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID));
-    }
-
-    public String getWorkflowRunIdString() {
-        return String.valueOf(Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID)));
-    }
-
-    public String getWorkflowUser() {
-        return getValue(WorkflowExecutionArgs.WORKFLOW_USER);
-    }
-
-    public long getExecutionCompletionTime() {
-
-        return creationTime;
-    }
-
-    public String getDatasourceName() { return getValue(WorkflowExecutionArgs.DATASOURCE_NAME); }
-
-    public long getWorkflowStartTime() {
-        return Long.parseLong(getValue(WorkflowExecutionArgs.WF_START_TIME));
-    }
-
-    public long getWorkflowEndTime() {
-        return Long.parseLong(getValue(WorkflowExecutionArgs.WF_END_TIME));
-    }
-
-
-    public Type getContextType() {
-        return Type.valueOf(getValue(WorkflowExecutionArgs.CONTEXT_TYPE));
-    }
-
-    public String getCounters() {
-        return getValue(WorkflowExecutionArgs.COUNTERS);
-    }
-
-    /**
-     * this method is invoked from with in the workflow.
-     *
-     * @throws java.io.IOException
-     * @throws org.apache.falcon.FalconException
-     */
-    public void serialize() throws IOException, FalconException {
-        serialize(getContextFile());
-    }
-
-    /**
-     * this method is invoked from with in the workflow.
-     *
-     * @param contextFile file to serialize the workflow execution metadata
-     * @throws org.apache.falcon.FalconException
-     */
-    public void serialize(String contextFile) throws FalconException {
-        LOG.info("Saving context to: [{}]", contextFile);
-        OutputStream out = null;
-        Path file = new Path(contextFile);
-        try {
-            FileSystem fs =
-                    actionJobConf == null ? HadoopClientFactory.get().createProxiedFileSystem(file.toUri())
-                                 : HadoopClientFactory.get().createProxiedFileSystem(file.toUri(), actionJobConf);
-            out = fs.create(file);
-            out.write(JSONValue.toJSONString(context).getBytes());
-        } catch (IOException e) {
-            throw new FalconException("Error serializing context to: " + contextFile,  e);
-        } finally {
-            if (out != null) {
-                try {
-                    out.close();
-                } catch (IOException ignore) {
-                    // ignore
-                }
-            }
-        }
-    }
-
-    @Override
-    public String toString() {
-        return "WorkflowExecutionContext{" + context.toString() + "}";
-    }
-
-    @SuppressWarnings("unchecked")
-    public static WorkflowExecutionContext deSerialize(String contextFile) throws FalconException {
-        try {
-            Path lineageDataPath = new Path(contextFile); // file has 777 permissions
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(
-                    lineageDataPath.toUri());
-
-            BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(lineageDataPath)));
-            return new WorkflowExecutionContext((Map<WorkflowExecutionArgs, String>) JSONValue.parse(in));
-        } catch (IOException e) {
-            throw new FalconException("Error opening context file: " + contextFile, e);
-        }
-    }
-
-    public static String getFilePath(String logDir, String entityName, String entityType,
-                                     EntityOperations operation) {
-        // needed by feed clean up
-        String parentSuffix = EntityType.PROCESS.name().equals(entityType)
-                || EntityOperations.REPLICATE == operation ? "" : "/context/";
-
-        // LOG_DIR is sufficiently unique
-        return new Path(logDir + parentSuffix, entityName + "-wf-post-exec-context.json").toString();
-    }
-
-
-    public static Path getCounterFile(String logDir) {
-        return new Path(logDir, "counter.txt");
-    }
-
-    public static String readCounters(FileSystem fs, Path counterFile) throws IOException{
-        StringBuilder counterBuffer = new StringBuilder();
-        BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(counterFile)));
-        try {
-            String line;
-            while ((line = in.readLine()) != null) {
-                counterBuffer.append(line);
-                counterBuffer.append(",");
-            }
-        } catch (IOException e) {
-            throw e;
-        } finally {
-            IOUtils.closeQuietly(in);
-        }
-
-        String counterString = counterBuffer.toString();
-        if (StringUtils.isNotBlank(counterString) && counterString.length() > 0) {
-            return counterString.substring(0, counterString.length() - 1);
-        } else {
-            return null;
-        }
-    }
-
-    public static WorkflowExecutionContext create(String[] args, Type type) throws FalconException {
-        return create(args, type, null);
-    }
-
-    public static WorkflowExecutionContext create(String[] args, Type type, Configuration conf) throws FalconException {
-        Map<WorkflowExecutionArgs, String> wfProperties = new HashMap<WorkflowExecutionArgs, String>();
-
-        try {
-            CommandLine cmd = getCommand(args);
-            for (WorkflowExecutionArgs arg : WorkflowExecutionArgs.values()) {
-                String optionValue = arg.getOptionValue(cmd);
-                if (StringUtils.isNotEmpty(optionValue)) {
-                    wfProperties.put(arg, optionValue);
-                }
-            }
-        } catch (ParseException e) {
-            throw new FalconException("Error parsing wf args", e);
-        }
-
-        WorkflowExecutionContext executionContext = new WorkflowExecutionContext(wfProperties);
-        executionContext.actionJobConf = conf;
-        executionContext.context.put(WorkflowExecutionArgs.CONTEXT_TYPE, type.name());
-        executionContext.context.put(WorkflowExecutionArgs.CONTEXT_FILE,
-                getFilePath(executionContext.getLogDir(), executionContext.getEntityName(),
-                        executionContext.getEntityType(), executionContext.getOperation()));
-        addCounterToWF(executionContext);
-
-        return executionContext;
-    }
-
-    private static void addCounterToWF(WorkflowExecutionContext executionContext) throws FalconException {
-        if (executionContext.hasWorkflowFailed()) {
-            LOG.info("Workflow Instance failed, counter will not be added: {}",
-                    executionContext.getWorkflowRunIdString());
-            return;
-        }
-
-        FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(
-                new Path(executionContext.getLogDir()).toUri());
-        Path counterFile = getCounterFile(executionContext.getLogDir());
-        try {
-            if (fs.exists(counterFile)) {
-                String counters = readCounters(fs, counterFile);
-                if (StringUtils.isNotBlank(counters)) {
-                    executionContext.context.put(WorkflowExecutionArgs.COUNTERS, counters);
-                }
-            }
-        } catch (IOException e) {
-            LOG.error("Error in accessing counter file :" + e);
-        } finally {
-            try {
-                if (fs.exists(counterFile)) {
-                    fs.delete(counterFile, false);
-                }
-            } catch (IOException e) {
-                LOG.error("Unable to delete counter file: {}", e);
-            }
-        }
-    }
-
-    private static CommandLine getCommand(String[] arguments) throws ParseException {
-        Options options = new Options();
-
-        for (WorkflowExecutionArgs arg : WorkflowExecutionArgs.values()) {
-            addOption(options, arg, arg.isRequired());
-        }
-
-        return new GnuParser().parse(options, arguments, false);
-    }
-
-    private static void addOption(Options options, WorkflowExecutionArgs arg, boolean isRequired) {
-        Option option = arg.getOption();
-        option.setRequired(isRequired);
-        options.addOption(option);
-    }
-
-    public static WorkflowExecutionContext create(Map<WorkflowExecutionArgs, String> wfProperties) {
-        return WorkflowExecutionContext.create(wfProperties, Type.POST_PROCESSING);
-    }
-
-    public static WorkflowExecutionContext create(Map<WorkflowExecutionArgs, String> wfProperties, Type type) {
-        wfProperties.put(WorkflowExecutionArgs.CONTEXT_TYPE, type.name());
-        return new WorkflowExecutionContext(wfProperties);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionListener.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionListener.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionListener.java
deleted file mode 100644
index 7bf14f2..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionListener.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.falcon.FalconException;
-
-/**
- * A listener interface for workflow execution.
- */
-public interface WorkflowExecutionListener {
-
-    /**
-     * Invoked when a workflow is succeeds.
-     * @param context
-     * @throws FalconException
-     */
-    void onSuccess(WorkflowExecutionContext context) throws FalconException;
-
-    /**
-     * Invoked when a workflow fails.
-     * @param context
-     * @throws FalconException
-     */
-    void onFailure(WorkflowExecutionContext context) throws FalconException;
-
-    /**
-     * Invoked on start of a workflow. Basically, when the workflow is RUNNING.
-     * @param context
-     * @throws FalconException
-     */
-    void onStart(WorkflowExecutionContext context) throws FalconException;
-
-    /**
-     * Invoked when a workflow is suspended.
-     * @param context
-     * @throws FalconException
-     */
-    void onSuspend(WorkflowExecutionContext context) throws FalconException;
-
-    /**
-     * Invoked when a workflow is in waiting state.
-     * @param context
-     * @throws FalconException
-     */
-    void onWait(WorkflowExecutionContext context) throws FalconException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
deleted file mode 100644
index b692258..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.aspect.GenericAlert;
-import org.apache.falcon.entity.EntityNotRegisteredException;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.service.FalconService;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Date;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * A workflow job end notification service.
- */
-public class WorkflowJobEndNotificationService implements FalconService {
-
-    private static final Logger LOG = LoggerFactory.getLogger(WorkflowJobEndNotificationService.class);
-
-    public static final String SERVICE_NAME = WorkflowJobEndNotificationService.class.getSimpleName();
-
-    private Set<WorkflowExecutionListener> listeners = new LinkedHashSet<WorkflowExecutionListener>();
-
-    // Maintain a cache of context built, so we don't have to query Oozie for every state change.
-    private Map<String, Properties> contextMap = new ConcurrentHashMap<>();
-
-    @Override
-    public String getName() {
-        return SERVICE_NAME;
-    }
-
-    // Mainly for test
-    Map<String, Properties> getContextMap() {
-        return contextMap;
-    }
-
-    @Override
-    public void init() throws FalconException {
-        String listenerClassNames = StartupProperties.get().getProperty(
-                "workflow.execution.listeners");
-        if (StringUtils.isEmpty(listenerClassNames)) {
-            return;
-        }
-
-        for (String listenerClassName : listenerClassNames.split(",")) {
-            listenerClassName = listenerClassName.trim();
-            if (listenerClassName.isEmpty()) {
-                continue;
-            }
-            WorkflowExecutionListener listener = ReflectionUtils.getInstanceByClassName(listenerClassName);
-            registerListener(listener);
-        }
-    }
-
-    @Override
-    public void destroy() throws FalconException {
-        listeners.clear();
-    }
-
-    public void registerListener(WorkflowExecutionListener listener) {
-        listeners.add(listener);
-    }
-
-    public void unregisterListener(WorkflowExecutionListener listener) {
-        listeners.remove(listener);
-    }
-
-    public void notifyFailure(WorkflowExecutionContext context) throws FalconException {
-        notifyWorkflowEnd(context);
-    }
-
-    public void notifySuccess(WorkflowExecutionContext context) throws FalconException {
-        notifyWorkflowEnd(context);
-    }
-
-    public void notifyStart(WorkflowExecutionContext context) throws FalconException {
-        // Start notifications can only be from Oozie JMS notifications
-        if (!updateContextFromWFConf(context)) {
-            return;
-        }
-        LOG.debug("Sending workflow start notification to listeners with context : {} ", context);
-        for (WorkflowExecutionListener listener : listeners) {
-            try {
-                listener.onStart(context);
-            } catch (Throwable t) {
-                // do not rethrow as other listeners do not get a chance
-                LOG.error("Error in listener {}", listener.getClass().getName(), t);
-            }
-        }
-    }
-
-    public void notifySuspend(WorkflowExecutionContext context) throws FalconException {
-        // Suspend notifications can only be from Oozie JMS notifications
-        if (!updateContextFromWFConf(context)) {
-            return;
-        }
-        LOG.debug("Sending workflow suspend notification to listeners with context : {} ", context);
-        for (WorkflowExecutionListener listener : listeners) {
-            try {
-                listener.onSuspend(context);
-            } catch (Throwable t) {
-                // do not rethrow as other listeners do not get a chance
-                LOG.error("Error in listener {}", listener.getClass().getName(), t);
-            }
-        }
-
-        instrumentAlert(context);
-        contextMap.remove(context.getWorkflowId());
-    }
-
-    public void notifyWait(WorkflowExecutionContext context) throws FalconException {
-        // Wait notifications can only be from Oozie JMS notifications
-        LOG.debug("Sending workflow wait notification to listeners with context : {} ", context);
-        for (WorkflowExecutionListener listener : listeners) {
-            try {
-                listener.onWait(context);
-            } catch (Throwable t) {
-                // do not rethrow as other listeners do not get a chance
-                LOG.error("Error in listener {}", listener.getClass().getName(), t);
-            }
-        }
-    }
-
-    // The method retrieves the conf from the cache if it is in cache.
-    // Else, queries WF Engine to retrieve the conf of the workflow
-    private boolean updateContextFromWFConf(WorkflowExecutionContext context) throws FalconException {
-        Properties wfProps = contextMap.get(context.getWorkflowId());
-        if (wfProps == null) {
-            Entity entity = null;
-            try {
-                entity = EntityUtil.getEntity(context.getEntityType(), context.getEntityName());
-            } catch (EntityNotRegisteredException e) {
-                // Entity no longer exists. No need to notify.
-                LOG.debug("Entity {} of type {} doesn't exist in config store. Notification Ignored.",
-                        context.getEntityName(), context.getEntityType());
-                contextMap.remove(context.getWorkflowId());
-                return false;
-            }
-            for (String cluster : EntityUtil.getClustersDefinedInColos(entity)) {
-                try {
-                    InstancesResult.Instance[] instances = WorkflowEngineFactory.getWorkflowEngine(entity)
-                            .getJobDetails(cluster, context.getWorkflowId()).getInstances();
-                    if (instances != null && instances.length > 0) {
-                        wfProps = getWFProps(instances[0].getWfParams());
-                        // Required by RetryService. But, is not part of conf.
-                        wfProps.setProperty(WorkflowExecutionArgs.RUN_ID.getName(),
-                                Integer.toString(instances[0].getRunId()));
-                    }
-                } catch (FalconException e) {
-                    // Do Nothing. Move on to the next cluster.
-                    continue;
-                }
-                contextMap.put(context.getWorkflowId(), wfProps);
-            }
-        }
-
-        // No extra props to enhance the context with.
-        if (wfProps == null || wfProps.isEmpty()) {
-            return true;
-        }
-
-        for (WorkflowExecutionArgs arg : WorkflowExecutionArgs.values()) {
-            if (wfProps.containsKey(arg.getName())) {
-                context.setValue(arg, wfProps.getProperty(arg.getName()));
-            }
-        }
-        return true;
-    }
-
-    private Properties getWFProps(InstancesResult.KeyValuePair[] wfParams) {
-        Properties props = new Properties();
-        for (InstancesResult.KeyValuePair kv : wfParams) {
-            props.put(kv.getKey(), kv.getValue());
-        }
-        return props;
-    }
-
-    // This method handles both success and failure notifications.
-    private void notifyWorkflowEnd(WorkflowExecutionContext context) throws FalconException {
-        // Need to distinguish notification from post processing for backward compatibility
-        if (context.getContextType() == WorkflowExecutionContext.Type.POST_PROCESSING) {
-            boolean engineNotifEnabled = false;
-            try {
-                engineNotifEnabled = WorkflowEngineFactory.getWorkflowEngine()
-                        .isNotificationEnabled(context.getClusterName(), context.getWorkflowId());
-            } catch (FalconException e) {
-                LOG.debug("Received error while checking if notification is enabled. "
-                        + "Hence, assuming notification is not enabled.");
-            }
-            // Ignore the message from post processing as there will be one more from Oozie.
-            if (engineNotifEnabled) {
-                LOG.info("Ignoring message from post processing as engine notification is enabled.");
-                return;
-            } else {
-                updateContextWithTime(context);
-            }
-        } else {
-            if (!updateContextFromWFConf(context)) {
-                return;
-            }
-        }
-
-        LOG.debug("Sending workflow end notification to listeners with context : {} ", context);
-
-        for (WorkflowExecutionListener listener : listeners) {
-            try {
-                if (context.hasWorkflowSucceeded()) {
-                    listener.onSuccess(context);
-                    instrumentAlert(context);
-                } else {
-                    listener.onFailure(context);
-                    if (context.hasWorkflowBeenKilled() || context.hasWorkflowFailed()) {
-                        instrumentAlert(context);
-                    }
-                }
-            } catch (Throwable t) {
-                // do not rethrow as other listeners do not get a chance
-                LOG.error("Error in listener {}", listener.getClass().getName(), t);
-            }
-        }
-
-        contextMap.remove(context.getWorkflowId());
-    }
-
-    // In case of notifications coming from post notifications, start and end time need to be populated.
-    private void updateContextWithTime(WorkflowExecutionContext context) {
-        try {
-            InstancesResult result = WorkflowEngineFactory.getWorkflowEngine()
-                    .getJobDetails(context.getClusterName(), context.getWorkflowId());
-            Date startTime = result.getInstances()[0].startTime;
-            Date endTime = result.getInstances()[0].endTime;
-            Date now = new Date();
-            if (startTime == null) {
-                startTime = now;
-            }
-            if (endTime == null) {
-                endTime = now;
-            }
-            context.setValue(WorkflowExecutionArgs.WF_START_TIME, Long.toString(startTime.getTime()));
-            context.setValue(WorkflowExecutionArgs.WF_END_TIME, Long.toString(endTime.getTime()));
-        } catch(FalconException e) {
-            LOG.error("Unable to retrieve job details for " + context.getWorkflowId() + " on cluster "
-                    + context.getClusterName(), e);
-        }
-    }
-
-    private void instrumentAlert(WorkflowExecutionContext context) {
-        String clusterName = context.getClusterName();
-        String entityName = context.getEntityName();
-        String entityType = context.getEntityType();
-        String operation = context.getOperation().name();
-        String workflowId = context.getWorkflowId();
-        String workflowUser = context.getWorkflowUser();
-        String nominalTime = context.getNominalTimeAsISO8601();
-        String runId = String.valueOf(context.getWorkflowRunId());
-        Date now = new Date();
-        // Start and/or End time may not be set in case of workflow suspend
-        Date endTime;
-        if (context.getWorkflowEndTime() == 0) {
-            endTime = now;
-        } else {
-            endTime = new Date(context.getWorkflowEndTime());
-        }
-
-        Date startTime;
-        if (context.getWorkflowStartTime() == 0) {
-            startTime = now;
-        } else {
-            startTime = new Date(context.getWorkflowStartTime());
-        }
-        Long duration = (endTime.getTime() - startTime.getTime()) * 1000000;
-
-        if (context.hasWorkflowFailed()) {
-            GenericAlert.instrumentFailedInstance(clusterName, entityType,
-                    entityName, nominalTime, workflowId, workflowUser, runId, operation,
-                    SchemaHelper.formatDateUTC(startTime), "", "", duration);
-        } else {
-            GenericAlert.instrumentSucceededInstance(clusterName, entityType,
-                    entityName, nominalTime, workflowId, workflowUser, runId, operation,
-                    SchemaHelper.formatDateUTC(startTime), duration);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java b/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java
deleted file mode 100644
index 4d8402a..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/engine/AbstractWorkflowEngine.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow.engine;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.LifeCycle;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-
-/**
- * Workflow engine should minimally support the
- * following operations.
- */
-public abstract class AbstractWorkflowEngine {
-
-    public static final String NAME_NODE = "nameNode";
-    public static final String JOB_TRACKER = "jobTracker";
-
-    protected Set<WorkflowEngineActionListener> listeners = new HashSet<WorkflowEngineActionListener>();
-
-    public void registerListener(WorkflowEngineActionListener listener) {
-        listeners.add(listener);
-    }
-
-    public abstract boolean isAlive(Cluster cluster) throws FalconException;
-
-    public abstract void schedule(Entity entity, Boolean skipDryRun, Map<String, String> properties)
-        throws FalconException;
-
-    public abstract String suspend(Entity entity) throws FalconException;
-
-    public abstract String resume(Entity entity) throws FalconException;
-
-    public abstract String delete(Entity entity) throws FalconException;
-
-    public abstract String delete(Entity entity, String cluster) throws FalconException;
-
-    public abstract String reRun(String cluster, String wfId, Properties props, boolean isForced)
-        throws FalconException;
-
-    public abstract void dryRun(Entity entity, String clusterName, Boolean skipDryRun) throws FalconException;
-
-    public abstract boolean isActive(Entity entity) throws FalconException;
-
-    public abstract boolean isSuspended(Entity entity) throws FalconException;
-
-    public abstract boolean isCompleted(Entity entity) throws FalconException;
-
-    public abstract InstancesResult getRunningInstances(Entity entity,
-                                                        List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract InstancesResult killInstances(Entity entity, Date start, Date end, Properties props,
-                                                  List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract InstancesResult reRunInstances(Entity entity, Date start, Date end, Properties props,
-                                                   List<LifeCycle> lifeCycles, Boolean isForced) throws FalconException;
-
-    public abstract InstancesResult suspendInstances(Entity entity, Date start, Date end, Properties props,
-                                                     List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract InstancesResult resumeInstances(Entity entity, Date start, Date end, Properties props,
-                                                    List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract InstancesResult getStatus(Entity entity, Date start, Date end,
-                                              List<LifeCycle> lifeCycles, Boolean allAttempts) throws FalconException;
-
-    public abstract InstancesSummaryResult getSummary(Entity entity, Date start, Date end,
-                                                      List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract String update(Entity oldEntity, Entity newEntity,
-                                  String cluster, Boolean skipDryRun) throws FalconException;
-
-    public abstract String touch(Entity entity, String cluster, Boolean skipDryRun) throws FalconException;
-
-    public abstract String getWorkflowStatus(String cluster, String jobId) throws FalconException;
-
-    public abstract Properties getWorkflowProperties(String cluster, String jobId) throws FalconException;
-
-    public abstract InstancesResult getJobDetails(String cluster, String jobId) throws FalconException;
-
-    public abstract InstancesResult getInstanceParams(Entity entity, Date start, Date end,
-                                                      List<LifeCycle> lifeCycles) throws FalconException;
-
-    public abstract boolean isNotificationEnabled(String cluster, String jobID) throws FalconException;
-
-    public abstract Boolean isWorkflowKilledByUser(String cluster, String jobId) throws FalconException;
-
-
-    /**
-     * Returns the short name of the Workflow Engine.
-     * @return
-     */
-    public abstract String getName();
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/engine/WorkflowEngineActionListener.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/engine/WorkflowEngineActionListener.java b/common/src/main/java/org/apache/falcon/workflow/engine/WorkflowEngineActionListener.java
deleted file mode 100644
index 2a1cbd4..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/engine/WorkflowEngineActionListener.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow.engine;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-
-/**
- * Listener that will be notified before and after
- * workflow life cycle operations are performed.
- */
-public interface WorkflowEngineActionListener {
-
-    void beforeSchedule(Entity entity, String cluster) throws FalconException;
-
-    void afterSchedule(Entity entity, String cluster) throws FalconException;
-
-    void beforeDelete(Entity entity, String cluster) throws FalconException;
-
-    void afterDelete(Entity entity, String cluster) throws FalconException;
-
-    void beforeSuspend(Entity entity, String cluster) throws FalconException;
-
-    void afterSuspend(Entity entity, String cluster) throws FalconException;
-
-    void beforeResume(Entity entity, String cluster) throws FalconException;
-
-    void afterResume(Entity entity, String cluster) throws FalconException;
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/util/OozieActionConfigurationHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/util/OozieActionConfigurationHelper.java b/common/src/main/java/org/apache/falcon/workflow/util/OozieActionConfigurationHelper.java
deleted file mode 100644
index 3f07c3c..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/util/OozieActionConfigurationHelper.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow.util;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Shell;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.StringWriter;
-
-/**
- * Utility to read oozie action conf at oozie.action.conf.xml.
- */
-public final class OozieActionConfigurationHelper {
-
-    private static final Logger LOG = LoggerFactory.getLogger(OozieActionConfigurationHelper.class);
-
-    private OozieActionConfigurationHelper() {
-    }
-
-    public static Configuration createActionConf() throws IOException {
-        Configuration conf = new Configuration();
-        Path confPath = new Path("file:///" + System.getProperty("oozie.action.conf.xml"));
-
-        final boolean actionConfExists = confPath.getFileSystem(conf).exists(confPath);
-        LOG.info("Oozie Action conf {} found ? {}", confPath, actionConfExists);
-        if (actionConfExists) {
-            LOG.info("Oozie Action conf found, adding path={}, conf={}", confPath, conf.toString());
-            conf.addResource(confPath);
-            dumpConf(conf, "oozie action conf ");
-        }
-
-        String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
-        if (tokenFile != null) {
-            if (Shell.WINDOWS) {
-                if (tokenFile.charAt(0) == '"') {
-                    tokenFile = tokenFile.substring(1);
-                }
-                if (tokenFile.charAt(tokenFile.length() - 1) == '"') {
-                    tokenFile = tokenFile.substring(0, tokenFile.length() - 1);
-                }
-            }
-
-            conf.set("mapreduce.job.credentials.binary", tokenFile);
-            System.setProperty("mapreduce.job.credentials.binary", tokenFile);
-            conf.set("tez.credentials.path", tokenFile);
-            System.setProperty("tez.credentials.path", tokenFile);
-        }
-
-        conf.set("datanucleus.plugin.pluginRegistryBundleCheck", "LOG");
-        conf.setBoolean("hive.exec.mode.local.auto", false);
-
-        return conf;
-    }
-
-    public static void dumpConf(Configuration conf, String message) throws IOException {
-        StringWriter writer = new StringWriter();
-        Configuration.dumpConfiguration(conf, writer);
-        LOG.info(message + " {}", writer);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java b/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
deleted file mode 100644
index 05f248e..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow.util;
-
-/**
- * Oozie Constants used across multiple modules.
- */
-public final class OozieConstants {
-    /**
-     * Constant for the oozie running in local.
-     */
-    public static final String LOCAL_OOZIE = "localoozie";
-
-    private OozieConstants() {
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/common/src/main/resources/log4j.xml b/common/src/main/resources/log4j.xml
deleted file mode 100644
index 75c8267..0000000
--- a/common/src/main/resources/log4j.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-    <appender name="console" class="org.apache.log4j.ConsoleAppender">
-        <param name="Target" value="System.out"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
-        </layout>
-    </appender>
-
-    <appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
-        <param name="File" value="${user.dir}/target/logs/application.log"/>
-        <param name="Append" value="true"/>
-        <param name="Threshold" value="debug"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
-        </layout>
-    </appender>
-
-    <appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
-        <param name="File" value="${user.dir}/target/logs/audit.log"/>
-        <param name="Append" value="true"/>
-        <param name="Threshold" value="debug"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d %x %m%n"/>
-        </layout>
-    </appender>
-
-    <appender name="METRIC" class="org.apache.log4j.DailyRollingFileAppender">
-        <param name="File" value="${user.dir}/target/logs/metric.log"/>
-        <param name="Append" value="true"/>
-        <param name="Threshold" value="debug"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d %m%n"/>
-        </layout>
-    </appender>
-
-    <appender name="ALERT" class="org.apache.log4j.DailyRollingFileAppender">
-        <param name="File" value="${falcon.log.dir}/${falcon.app.type}.alerts.log"/>
-        <param name="Append" value="true"/>
-        <param name="Threshold" value="debug"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d %m%n"/>
-        </layout>
-    </appender>
-
-    <logger name="org.apache.falcon" additivity="false">
-        <level value="debug"/>
-        <appender-ref ref="FILE"/>
-    </logger>
-
-    <logger name="AUDIT">
-        <level value="info"/>
-        <appender-ref ref="AUDIT"/>
-    </logger>
-
-    <logger name="METRIC">
-        <level value="info"/>
-        <appender-ref ref="METRIC"/>
-    </logger>
-
-    <root>
-        <priority value="info"/>
-        <appender-ref ref="console"/>
-    </root>
-
-</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/resources/runtime.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/runtime.properties b/common/src/main/resources/runtime.properties
deleted file mode 100644
index 643559e..0000000
--- a/common/src/main/resources/runtime.properties
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=debug
-
-*.falcon.parentworkflow.retry.max=3
-*.falcon.parentworkflow.retry.interval.secs=1
-
-*.falcon.replication.workflow.maxmaps=5
-*.falcon.replication.workflow.mapbandwidth=100
-*.webservices.default.results.per.page=10
-
-# If true, do not run retention past feedCluster validity end time.
-# This will retain recent instances beyond feedCluster validity end time.
-*.falcon.retention.keep.instances.beyond.validity=true
-
-# Default configs to handle replication for late arriving feeds.
-*.feed.late.allowed=true
-*.feed.late.frequency=hours(3)
-*.feed.late.policy=exp-backoff
-
-# If true, Falcon skips oozie dryrun while scheduling entities.
-*.falcon.skip.dryrun=false
-
-######### Proxyuser Configuration Start #########
-
-#List of hosts the '#USER#' user is allowed to perform 'doAs 'operations from. The '#USER#' must be replaced with the
-#username of the user who is allowed to perform 'doAs' operations. The value can be the '*' wildcard or a list of
-#comma separated hostnames
-
-*.falcon.service.ProxyUserService.proxyuser.#USER#.hosts=*
-
-#List of groups the '#USER#' user is allowed to 'doAs 'operations. The '#USER#' must be replaced with the
-#username of the user who is allowed to perform 'doAs' operations. The value can be the '*' wildcard or a list of
-#comma separated groups
-
-*.falcon.service.ProxyUserService.proxyuser.#USER#.groups=*
-
-######### Proxyuser Configuration End #########
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/resources/startup.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/startup.properties b/common/src/main/resources/startup.properties
deleted file mode 100644
index 2497cce..0000000
--- a/common/src/main/resources/startup.properties
+++ /dev/null
@@ -1,306 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=debug
-
-######### Implementation classes #########
-## DONT MODIFY UNLESS SURE ABOUT CHANGE ##
-
-*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine
-*.lifecycle.engine.impl=org.apache.falcon.lifecycle.engine.oozie.OoziePolicyBuilderFactory
-*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder
-*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder
-*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager
-*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService
-*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager
-*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
-
-##### Falcon Services #####
-*.application.services=org.apache.falcon.security.AuthenticationInitializationService,\
-                        org.apache.falcon.workflow.WorkflowJobEndNotificationService, \
-                        org.apache.falcon.service.ProcessSubscriberService,\
-                        org.apache.falcon.service.FeedSLAMonitoringService,\
-                        org.apache.falcon.service.LifecyclePolicyMap,\
-                        org.apache.falcon.entity.store.ConfigurationStore,\
-                        org.apache.falcon.rerun.service.RetryService,\
-                        org.apache.falcon.rerun.service.LateRunService,\
-                        org.apache.falcon.metadata.MetadataMappingService,\
-                        org.apache.falcon.service.LogCleanupService,\
-                        org.apache.falcon.service.GroupsService,\
-                        org.apache.falcon.service.ProxyUserService,\
-                        org.apache.falcon.adfservice.ADFProviderService
-## If you wish to use Falcon native scheduler add the commented out services below to application.services ##
-#                        org.apache.falcon.notification.service.impl.JobCompletionService,\
-#                        org.apache.falcon.notification.service.impl.SchedulerService,\
-#                        org.apache.falcon.notification.service.impl.AlarmService,\
-#                        org.apache.falcon.notification.service.impl.DataAvailabilityService,\
-#                        org.apache.falcon.execution.FalconExecutionService,\
-#                        org.apache.falcon.state.store.service.FalconJPAService
-
-
-# List of Lifecycle policies configured.
-*.falcon.feed.lifecycle.policies=org.apache.falcon.lifecycle.retention.AgeBasedDelete
-# List of builders for the policies.
-*.falcon.feed.lifecycle.policy.builders=org.apache.falcon.lifecycle.engine.oozie.retention.AgeBasedDeleteBuilder
-##### Falcon Configuration Store Change listeners #####
-*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
-                        org.apache.falcon.entity.ColoClusterRelation,\
-                        org.apache.falcon.group.FeedGroupMap,\
-                        org.apache.falcon.entity.store.FeedLocationStore,\
-                        org.apache.falcon.service.FeedSLAMonitoringService,\
-                        org.apache.falcon.service.SharedLibraryHostingService
-## If you wish to use Falcon native scheduler, add the State store as a configstore listener. ##
-#                       org.apache.falcon.state.store.jdbc.JdbcStateStore
-
-##### JMS MQ Broker Implementation class #####
-*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory
-
-##### List of shared libraries for Falcon workflows #####
-*.shared.libs=activemq-all,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3
-
-##### Workflow Job Execution Completion listeners #####
-*.workflow.execution.listeners=
-
-######### Implementation classes #########
-
-
-######### System startup parameters #########
-
-# Location of libraries that is shipped to Hadoop
-*.system.lib.location=${FALCON_HOME}/sharedlibs
-
-# Location to store user entity configurations
-
-#Configurations used in UTs
-debug.config.store.uri=file://${user.dir}/target/store
-#Location to store state of Feed SLA monitoring service
-debug.feed.sla.service.store.uri= file://${user.dir}/target/data/sla/pendingfeedinstances
-debug.config.oozie.conf.uri=${user.dir}/target/oozie
-debug.system.lib.location=${system.lib.location}
-debug.broker.url=vm://localhost
-debug.retry.recorder.path=${user.dir}/target/retry
-debug.libext.feed.retention.paths=${falcon.libext}
-debug.libext.feed.replication.paths=${falcon.libext}
-debug.libext.process.paths=${falcon.libext}
-
-#Configurations used in ITs
-it.config.store.uri=file://${user.dir}/target/store
-it.config.oozie.conf.uri=${user.dir}/target/oozie
-it.system.lib.location=${system.lib.location}
-it.broker.url=tcp://localhost:61616
-it.retry.recorder.path=${user.dir}/target/retry
-it.libext.feed.retention.paths=${falcon.libext}
-it.libext.feed.replication.paths=${falcon.libext}
-it.libext.process.paths=${falcon.libext}
-it.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandler
-
-*.falcon.cleanup.service.frequency=minutes(5)
-
-######### Properties for Feed SLA Monitoring #########
-# frequency of serialization for the state of FeedSLAMonitoringService - 1 hour
-*.feed.sla.serialization.frequency.millis=3600000
-
-# Maximum number of pending instances per feed that will be recorded. After this older instances will be removed in
-# a FIFO fashion.
-*.feed.sla.queue.size=288
-
-# Do not change unless really sure
-# Frequency in seconds of "status check" for pending feed instances, default is 10 mins = 10 * 60
-*.feed.sla.statusCheck.frequency.seconds=600
-
-# Do not change unless really sure
-# Time Duration (in milliseconds) in future for generating pending feed instances.
-# In every cycle pending feed instances are added for monitoring, till this time in future.
-# It must be more than statusCheck frequency, default is 15 mins = 15 * 60 * 1000
-*.feed.sla.lookAheadWindow.millis=900000
-
-
-######### Properties for configuring JMS provider - activemq #########
-# Default Active MQ url
-*.broker.url=tcp://localhost:61616
-
-# default time-to-live for a JMS message 3 days (time in minutes)
-*.broker.ttlInMins=4320
-*.entity.topic=FALCON.ENTITY.TOPIC
-*.max.retry.failure.count=1
-*.retry.recorder.path=${user.dir}/logs/retry
-
-######### Properties for configuring iMon client and metric #########
-*.internal.queue.size=1000
-
-
-######### Graph Database Properties #########
-# Graph implementation
-*.falcon.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory
-
-# Graph Storage
-*.falcon.graph.storage.directory=${user.dir}/target/graphdb
-*.falcon.graph.storage.backend=berkeleyje
-*.falcon.graph.serialize.path=${user.dir}/target/graphdb
-*.falcon.graph.preserve.history=false
-*.falcon.graph.transaction.retry.count=3
-*.falcon.graph.transaction.retry.delay=5
-
-# Uncomment and override the following properties for enabling metrics for titan db and pushing them to graphite. You
-# can use other reporters like ganglia also.
-# Refer (http://thinkaurelius.github.io/titan/wikidoc/0.4.2/Titan-Performance-and-Monitoring)for finding the
-# relevant configurations for your use case. NOTE: you have to prefix all the properties with "*.falcon.graph."
-# *.falcon.graph.storage.enable-basic-metrics = true
-# Required; IP or hostname string
-# *.falcon.graph.metrics.graphite.hostname = 192.168.0.1
-# Required; specify logging interval in milliseconds
-# *.falcon.graph.metrics.graphite.interval = 60000
-
-######### Authentication Properties #########
-
-# Authentication type must be specified: simple|kerberos
-*.falcon.authentication.type=simple
-
-##### Service Configuration
-
-# Indicates the Kerberos principal to be used in Falcon Service.
-*.falcon.service.authentication.kerberos.principal=
-
-# Location of the keytab file with the credentials for the Service principal.
-*.falcon.service.authentication.kerberos.keytab=
-
-# name node principal to talk to config store
-*.dfs.namenode.kerberos.principal=
-
-##### SPNEGO Configuration
-
-# Authentication type must be specified: simple|kerberos|<class>
-# org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler can be used for backwards compatibility
-*.falcon.http.authentication.type=simple
-
-# Indicates how long (in seconds) an authentication token is valid before it has to be renewed.
-*.falcon.http.authentication.token.validity=36000
-
-# The signature secret for signing the authentication tokens.
-*.falcon.http.authentication.signature.secret=falcon
-
-# The domain to use for the HTTP cookie that stores the authentication token.
-*.falcon.http.authentication.cookie.domain=
-
-# Indicates if anonymous requests are allowed when using 'simple' authentication.
-*.falcon.http.authentication.simple.anonymous.allowed=false
-
-# Indicates the Kerberos principal to be used for HTTP endpoint.
-# The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
-*.falcon.http.authentication.kerberos.principal=
-
-# Location of the keytab file with the credentials for the HTTP principal.
-*.falcon.http.authentication.kerberos.keytab=
-
-# The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.
-*.falcon.http.authentication.kerberos.name.rules=DEFAULT
-
-# Comma separated list of black listed users
-*.falcon.http.authentication.blacklisted.users=
-
-######### Authentication Properties #########
-
-
-######### Authorization Properties #########
-
-# Authorization Enabled flag: false (default)|true
-*.falcon.security.authorization.enabled=false
-
-# The name of the group of super-users
-*.falcon.security.authorization.superusergroup=falcon
-
-# Admin Users, comma separated users
-*.falcon.security.authorization.admin.users=falcon,ambari-qa
-
-# Admin Group Membership, comma separated users
-*.falcon.security.authorization.admin.groups=falcon,staff
-
-# Authorization Provider Implementation Fully Qualified Class Name
-*.falcon.security.authorization.provider=org.apache.falcon.security.DefaultAuthorizationProvider
-
-######### Authorization Properties #########
-
-######### ADF Configurations start #########
-
-# A String object that represents the namespace
-*.microsoft.windowsazure.services.servicebus.namespace=
-
-# Request and status queues on the namespace
-*.microsoft.windowsazure.services.servicebus.requestqueuename=
-*.microsoft.windowsazure.services.servicebus.statusqueuename=
-
-# A String object that contains the SAS key name
-*.microsoft.windowsazure.services.servicebus.sasKeyName=
-
-# A String object that contains the SAS key
-*.microsoft.windowsazure.services.servicebus.sasKey=
-
-# A String object containing the base URI that is added to your Service Bus namespace to form the URI to connect
-# to the Service Bus service. To access the default public Azure service, pass ".servicebus.windows.net"
-*.microsoft.windowsazure.services.servicebus.serviceBusRootUri=
-
-# Service bus polling frequency
-*.microsoft.windowsazure.services.servicebus.polling.frequency=
-
-# Super user
-*.microsoft.windowsazure.services.servicebus.superuser=
-
-######### ADF Configurations end ###########
-
-######### SMTP Properties ########
-
-# Setting SMTP hostname
-#*.falcon.email.smtp.host=localhost
-
-# Setting SMTP port number
-#*.falcon.email.smtp.port=25
-
-# Setting email from address
-#*.falcon.email.from.address=falcon@localhost
-
-# Setting email Auth
-#*.falcon.email.smtp.auth=false
-
-#Setting user name
-#*.falcon.email.smtp.user=""
-
-#Setting password
-#*.falcon.email.smtp.password=""
-
-# Setting monitoring plugin, if SMTP parameters is defined
-#*.monitoring.plugins=org.apache.falcon.plugin.DefaultMonitoringPlugin,\
-#                     org.apache.falcon.plugin.EmailNotificationPlugin
-
-######### StateStore Properties #####
-#*.falcon.state.store.impl=org.apache.falcon.state.store.jdbc.JDBCStateStore
-#*.falcon.statestore.jdbc.driver=org.apache.derby.jdbc.EmbeddedDriver
-#*.falcon.statestore.jdbc.url=jdbc:derby:data/statestore.db;create=true
-#*.falcon.statestore.jdbc.username=sa
-#*.falcon.statestore.jdbc.password=
-#*.falcon.statestore.connection.data.source=org.apache.commons.dbcp.BasicDataSource
-## Maximum number of active connections that can be allocated from this pool at the same time.
-#*.falcon.statestore.pool.max.active.conn=10
-#*.falcon.statestore.connection.properties=
-## Indicates the interval (in milliseconds) between eviction runs.
-#*.falcon.statestore.validate.db.connection.eviction.interval=300000
-## The number of objects to examine during each run of the idle object evictor thread.
-#*.falcon.statestore.validate.db.connection.eviction.num=10
-## Creates Falcon DB.
-## If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-## If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-#*.falcon.statestore.create.db.schema=true
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/resources/statestore.credentials
----------------------------------------------------------------------
diff --git a/common/src/main/resources/statestore.credentials b/common/src/main/resources/statestore.credentials
deleted file mode 100644
index 86c32a1..0000000
--- a/common/src/main/resources/statestore.credentials
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-######### StateStore Credentials #####
-#*.falcon.statestore.jdbc.username=sa
-#*.falcon.statestore.jdbc.password=
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/resources/statestore.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/statestore.properties b/common/src/main/resources/statestore.properties
deleted file mode 100644
index 44e79b3..0000000
--- a/common/src/main/resources/statestore.properties
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-*.domain=debug
-
-######### StateStore Properties #####
-#*.falcon.state.store.impl=org.apache.falcon.state.store.jdbc.JDBCStateStore
-#*.falcon.statestore.jdbc.driver=org.apache.derby.jdbc.EmbeddedDriver
-## Falcon currently supports derby, mysql and postgreSQL, change url based on DB.
-#*.falcon.statestore.jdbc.url=jdbc:derby:data/falcon.db;create=true
-
-## StateStore credentials file where username,password and other properties can be stored securely.
-## Set this credentials file permission 400 and make sure user who starts falcon should only have read permission.
-## Give Absolute path to credentials file along with file name or put in classpath with filename statestore.credentials.
-## Credentials file should be present either in given location or class path, otherwise falcon won't start.
-#*.falcon.statestore.credentials.file=
-
-#*.falcon.statestore.connection.data.source=org.apache.commons.dbcp.BasicDataSource
-## Maximum number of active connections that can be allocated from this pool at the same time.
-#*.falcon.statestore.pool.max.active.conn=10
-## Any additional connection properties that need to be used, specified as comma separated key=value pairs.
-#*.falcon.statestore.connection.properties=
-## Indicates the interval (in milliseconds) between eviction runs.
-#*.falcon.statestore.validate.db.connection.eviction.interval=300000
-## The number of objects to examine during each run of the idle object evictor thread.
-#*.falcon.statestore.validate.db.connection.eviction.num=10
-## Creates Falcon DB.
-## If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-## If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-#*.falcon.statestore.create.db.schema=true
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/cleanup/LogCleanupServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/cleanup/LogCleanupServiceTest.java b/common/src/test/java/org/apache/falcon/cleanup/LogCleanupServiceTest.java
deleted file mode 100644
index 0df59b2..0000000
--- a/common/src/test/java/org/apache/falcon/cleanup/LogCleanupServiceTest.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.cleanup;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * Test for log cleanup service.
- */
-public class LogCleanupServiceTest extends AbstractTestBase {
-
-    private FileSystem fs;
-    private FileSystem tfs;
-    private EmbeddedCluster targetDfsCluster;
-
-    private final Path instanceLogPath = new Path("/projects/falcon/staging/falcon/workflows/process/"
-        + "sample" + "/logs/job-2010-01-01-01-00/000");
-    private final Path instanceLogPath1 = new Path("/projects/falcon/staging/falcon/workflows/process/"
-        + "sample" + "/logs/job-2010-01-01-01-00/001");
-    private final Path instanceLogPath2 = new Path("/projects/falcon/staging/falcon/workflows/process/"
-        + "sample" + "/logs/job-2010-01-01-02-00/001");
-    private final Path instanceLogPath3 = new Path("/projects/falcon/staging/falcon/workflows/process/"
-        + "sample2" + "/logs/job-2010-01-01-01-00/000");
-    private final Path instanceLogPath4 = new Path("/projects/falcon/staging/falcon/workflows/process/"
-        + "sample" + "/logs/latedata/2010-01-01-01-00");
-    private final Path instanceLogPath5 = new Path("/projects/falcon/staging/falcon/workflows/process/"
-            + "sample3" + "/logs/job-2010-01-01-01-00/000");
-    private final Path feedInstanceLogPath = new Path("/projects/falcon/staging/falcon/workflows/feed/"
-        + "impressionFeed" + "/logs/job-2010-01-01-01-00/testCluster/000");
-    private final Path feedInstanceLogPath1 = new Path("/projects/falcon/staging/falcon/workflows/feed/"
-        + "impressionFeed2" + "/logs/job-2010-01-01-01-00/testCluster/000");
-
-
-    @AfterClass
-    public void tearDown() {
-        this.dfsCluster.shutdown();
-        this.targetDfsCluster.shutdown();
-    }
-
-    @Override
-    @BeforeClass
-    public void setup() throws Exception {
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster", CurrentUser.getUser());
-        conf = dfsCluster.getConf();
-        fs = dfsCluster.getFileSystem();
-        fs.delete(new Path("/"), true);
-
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        System.setProperty("test.build.data", "target/tdfs/data" + System.currentTimeMillis());
-        this.targetDfsCluster = EmbeddedCluster.newCluster("backupCluster");
-        conf = targetDfsCluster.getConf();
-
-        storeEntity(EntityType.CLUSTER, "backupCluster");
-        storeEntity(EntityType.FEED, "impressionFeed");
-        storeEntity(EntityType.FEED, "clicksFeed");
-        storeEntity(EntityType.FEED, "imp-click-join1");
-        storeEntity(EntityType.FEED, "imp-click-join2");
-        storeEntity(EntityType.PROCESS, "sample");
-        Process process = ConfigurationStore.get().get(EntityType.PROCESS, "sample");
-        Process otherProcess = (Process) process.copy();
-        otherProcess.setName("sample2");
-        otherProcess.setFrequency(new Frequency("days(1)"));
-        Process noACLProcess = (Process) process.copy();
-        noACLProcess.setName("sample3");
-        noACLProcess.setACL(null);
-        ConfigurationStore.get().remove(EntityType.PROCESS,
-                otherProcess.getName());
-        ConfigurationStore.get().publish(EntityType.PROCESS, otherProcess);
-        ConfigurationStore.get().remove(EntityType.PROCESS,
-                noACLProcess.getName());
-        ConfigurationStore.get().publish(EntityType.PROCESS, noACLProcess);
-
-        fs.mkdirs(instanceLogPath);
-        fs.mkdirs(instanceLogPath1);
-        fs.mkdirs(instanceLogPath2);
-        fs.mkdirs(instanceLogPath3);
-        fs.mkdirs(instanceLogPath4);
-        fs.mkdirs(instanceLogPath5);
-
-        // fs.setTimes wont work on dirs
-        fs.createNewFile(new Path(instanceLogPath, "oozie.log"));
-        fs.createNewFile(new Path(instanceLogPath, "pigAction_SUCCEEDED.log"));
-
-        tfs = targetDfsCluster.getFileSystem();
-        tfs.delete(new Path("/"), true);
-        fs.mkdirs(feedInstanceLogPath);
-        fs.mkdirs(feedInstanceLogPath1);
-        tfs.mkdirs(feedInstanceLogPath);
-        tfs.mkdirs(feedInstanceLogPath1);
-        fs.createNewFile(new Path(feedInstanceLogPath, "oozie.log"));
-        tfs.createNewFile(new Path(feedInstanceLogPath, "oozie.log"));
-
-        // table feed staging dir setup
-        initializeStagingDirs();
-        Thread.sleep(1000);
-    }
-
-    private void initializeStagingDirs() throws Exception {
-        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
-        Feed tableFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(inputStream);
-        getStore().publish(EntityType.FEED, tableFeed);
-    }
-
-    @Test
-    public void testProcessLogs() throws IOException, FalconException, InterruptedException {
-
-        Assert.assertTrue(fs.exists(instanceLogPath));
-        Assert.assertTrue(fs.exists(instanceLogPath1));
-        Assert.assertTrue(fs.exists(instanceLogPath2));
-        Assert.assertTrue(fs.exists(instanceLogPath3));
-
-        AbstractCleanupHandler processCleanupHandler = new ProcessCleanupHandler();
-        processCleanupHandler.cleanup();
-
-        Assert.assertFalse(fs.exists(instanceLogPath));
-        Assert.assertFalse(fs.exists(instanceLogPath1));
-        Assert.assertFalse(fs.exists(instanceLogPath2));
-        Assert.assertFalse(fs.exists(instanceLogPath5));
-        Assert.assertTrue(fs.exists(instanceLogPath3));
-    }
-
-    @Test
-    public void testFeedLogs() throws IOException, FalconException, InterruptedException {
-
-        Assert.assertTrue(fs.exists(feedInstanceLogPath));
-        Assert.assertTrue(tfs.exists(feedInstanceLogPath));
-        Assert.assertTrue(fs.exists(feedInstanceLogPath1));
-        Assert.assertTrue(tfs.exists(feedInstanceLogPath1));
-
-        AbstractCleanupHandler feedCleanupHandler = new FeedCleanupHandler();
-        feedCleanupHandler.cleanup();
-
-        Assert.assertFalse(fs.exists(feedInstanceLogPath));
-        Assert.assertFalse(tfs.exists(feedInstanceLogPath));
-        Assert.assertTrue(fs.exists(feedInstanceLogPath1));
-        Assert.assertTrue(tfs.exists(feedInstanceLogPath1));
-    }
-}


[18/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/README.md
----------------------------------------------------------------------
diff --git a/falcon-regression/README.md b/falcon-regression/README.md
deleted file mode 100644
index acb7175..0000000
--- a/falcon-regression/README.md
+++ /dev/null
@@ -1,291 +0,0 @@
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-Falcon Regression
-=================
-This project had 2 modules : 
-
-1. merlin: it has all the system test for falcon
-2. merlin-core: it has all the utils used by merlin
-
-Requirements
-------------
-In addition to falcon server and prism, running full falcon regression requires three clusters.
-Each of these clusters must have:
-
-- hadoop
-- oozie
-- hive
-- hcat
-For specific tests it may be possible to run it without all clusters and components.
-
-Prior to running tests Merlin.properties needs to be created and populated with cluster details.
-
-Configuring Merlin.properties
------------------------------
-Merlin.properties must be created before running falcon regression tests.
-The file must be created at the location:
-
-    falcon/falcon-regression/merlin/src/main/resources/Merlin.properties
-
-Populate it with prism related properties:
-
-    #prism properties
-    prism.oozie_url = http://node-1.example.com:11000/oozie/
-    prism.oozie_location = /usr/lib/oozie/bin
-    prism.qa_host = node-1.example.com
-    prism.service_user = falcon
-    prism.hadoop_url = node-1.example.com:8020
-    prism.hadoop_location = /usr/lib/hadoop/bin/hadoop
-    prism.hostname = http://node-1.example.com:15000
-    prism.storeLocation = hdfs://node-1.example.com:8020/apps/falcon
-
-Specify the clusters that you would be using for testing:
-
-    servers = cluster1,cluster2,cluster3
-
-For each cluster specify properties:
-
-    #cluster1 properties
-    cluster1.oozie_url = http://node-1.example.com:11000/oozie/
-    cluster1.oozie_location = /usr/lib/oozie/bin
-    cluster1.qa_host = node-1.example.com
-    cluster1.service_user = falcon
-    cluster1.password = rgautam
-    cluster1.hadoop_url = node-1.example.com:8020
-    cluster1.hadoop_location = /usr/lib/hadoop/bin/hadoop
-    cluster1.hostname = http://node-1.example.com:15000
-    cluster1.cluster_readonly = webhdfs://node-1.example.com:50070
-    cluster1.cluster_execute = node-1.example.com:8032
-    cluster1.cluster_write = hdfs://node-1.example.com:8020
-    cluster1.activemq_url = tcp://node-1.example.com:61616?daemon=true
-    cluster1.storeLocation = hdfs://node-1.example.com:8020/apps/falcon
-    cluster1.colo = default
-    cluster1.namenode.kerberos.principal = nn/node-1.example.com@none
-    cluster1.hive.metastore.kerberos.principal = hive/node-1.example.com@none
-    cluster1.hcat_endpoint = thrift://node-1.example.com:9083
-    cluster1.service_stop_cmd = /usr/lib/falcon/bin/falcon-stop
-    cluster1.service_start_cmd = /usr/lib/falcon/bin/falcon-start
-
-To not clean root tests dir before every test:
-
-    clean_tests_dir=false
-
-Setting up HDFS Dirs
---------------------
-On all cluster as user that started falcon server do:
-
-    hdfs dfs -mkdir -p  /tmp/falcon-regression-staging
-    hdfs dfs -chmod 777 /tmp/falcon-regression-staging
-    hdfs dfs -mkdir -p  /tmp/falcon-regression-working
-    hdfs dfs -chmod 755 /tmp/falcon-regression-working
-
-Running Tests
--------------
-After creating Merlin.properties file. You can run the following commands to run the tests.
-
-    cd falcon-regression
-    mvn clean test -Phadoop-2
-
-Profiles Supported: hadoop-2
-
-To run a specific test:
-
-    mvn clean test -Phadoop-2 -Dtest=EmbeddedPigScriptTest
-
-If you want to use specific version of any component, they can be specified using -D, for eg:
-
-    mvn clean test -Phadoop-2 -Doozie.version=4.1.0 -Dhadoop.version=2.6.0
-
-Security Tests:
----------------
-ACL tests require multiple user account setup:
-
-    other.user.name=root
-    falcon.super.user.name=falcon
-    falcon.super2.user.name=falcon2
-
-ACL tests also require group name of the current user:
-
-    current_user.group.name=users
-
-For testing with kerberos set keytabs properties for different users:
-
-    current_user_keytab=/home/qa/hadoopqa/keytabs/qa.headless.keytab
-    falcon.super.user.keytab=/home/qa/hadoopqa/keytabs/falcon.headless.keytab
-    falcon.super2.user.keytab=/home/qa/hadoopqa/keytabs/falcon2.headless.keytab
-    other.user.keytab=/home/qa/hadoopqa/keytabs/root.headless.keytab
-
-Adding tests to falcon regression:
-----------------------------------
-If you wish to contribute to falcon regression, it's as easy as it gets.
-All test classes must be added to the directory:
-
-    falcon/falcon-regression/merlin/src/test/java
-
-This directory contains sub directories such as prism, ui, security, etc
-which contain tests specific to these aspects of falcon. Any general test
-can be added directly to the parent directory above. If you wish to write
-a series of tests for a new feature, feel free to create a new sub directory.
-Your test can use the various process/feed/cluster/workflow templates present in:
-
-    falcon/falcon-regression/merlin/src/test/resources
-
-or you can add your own bundle of XMLs in this directory. Please avoid redundancy of any resource.
-
-Each test class can contain multiple related tests. Let us look at a sample test class.
-*Refer to comments in code for aid* :
-
-```java
-    //The License note must be added to each test
-
-    /**
-     * Licensed to the Apache Software Foundation (ASF) under one
-     * or more contributor license agreements.  See the NOTICE file
-     * distributed with this work for additional information
-     * regarding copyright ownership.  The ASF licenses this file
-     * to you under the Apache License, Version 2.0 (the
-     * "License"); you may not use this file except in compliance
-     * with the License.  You may obtain a copy of the License at
-     *
-     *     http://www.apache.org/licenses/LICENSE-2.0
-     *
-     * Unless required by applicable law or agreed to in writing, software
-     * distributed under the License is distributed on an "AS IS" BASIS,
-     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     * See the License for the specific language governing permissions and
-     * limitations under the License.
-     */
-
-    package org.apache.falcon.regression;
-
-
-    import org.apache.falcon.regression.core.bundle.Bundle;
-    import org.apache.falcon.regression.core.helpers.ColoHelper;
-    import org.apache.falcon.regression.core.response.ServiceResponse;
-    import org.apache.falcon.regression.core.util.AssertUtil;
-    import org.apache.falcon.regression.core.util.BundleUtil;
-    import org.apache.falcon.regression.testHelper.BaseTestClass;
-    import org.testng.annotations.AfterMethod;
-    import org.testng.annotations.BeforeMethod;
-    import org.testng.annotations.Test;
-
-    @Test(groups = "embedded")
-
-    //Every test class must inherit the BaseTestClass. This class
-    //helps using properties mentioned in Merlin.properties, in the test.
-
-    public class FeedSubmitTest extends BaseTestClass {
-
-        private ColoHelper cluster = servers.get(0);
-        private String feed;
-
-        @BeforeMethod(alwaysRun = true)
-        public void setUp() throws Exception {
-
-        //Several Util classes are available, such as BundleUtil, which for example
-        //has been used here to read the ELBundle present in falcon/falcon-regression/src/test/resources
-
-            bundles[0] = BundleUtil.readELBundle();
-            bundles[0].generateUniqueBundle();
-            bundles[0] = new Bundle(bundles[0], cluster);
-
-            //submit the cluster
-            ServiceResponse response =
-                prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-            AssertUtil.assertSucceeded(response);
-            feed = bundles[0].getInputFeedFromBundle();
-        }
-
-        @AfterMethod(alwaysRun = true)
-        public void tearDown() {
-            removeBundles();
-        }
-
-        //Java docs must be added for each test function, explaining what the function does
-
-        /**
-         * Submit correctly adjusted feed. Response should reflect success.
-         *
-         * @throws Exception
-         */
-        @Test(groups = {"singleCluster"})
-        public void submitValidFeed() throws Exception {
-            ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-            AssertUtil.assertSucceeded(response);
-        }
-
-        /**
-         * Submit and remove feed. Try to submit it again. Response should reflect success.
-         *
-         * @throws Exception
-         */
-        @Test(groups = {"singleCluster"})
-        public void submitValidFeedPostDeletion() throws Exception {
-            ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-            AssertUtil.assertSucceeded(response);
-
-            response = prism.getFeedHelper().delete(feed);
-            AssertUtil.assertSucceeded(response);
-            response = prism.getFeedHelper().submitEntity(feed);
-            AssertUtil.assertSucceeded(response);
-        }
-    }
-```
-
-* This class, as the name suggests was to test the Feed Submition aspect of Falcon.
-It contains multiple test functions, all of which however are various test cases for the same
-feature. This organisation in code must be maintained.
-
-* In order to be able to manipulate feeds, processes and clusters for the various tests,
-objects of classes FeedMerlin, ProcessMerlin, ClusterMerlin can be used. There are already existing
-functions which use these objects, such as setProcessInput, setFeedValidity, setProcessConcurrency,
-setInputFeedPeriodicity etc. in Bundle.java which should serve your purpose well enough.
-
-* To add more on the utils, you can use functions in HadoopUtil to create HDFS dirs, delete them,
-and add data on HDFS, OozieUtil to hit Oozie for checking coordinator/workflow status, TimeUtil to
-get lists of dates and directories to aid in data creation, HCatUtil for Hcatalog related utilities,
-and many others to make writing tests very easy.
-
-* Coding conventions are strictly followed. Use the checkstyle xml present in
-      falcon/checkstyle/src/main/resources/falcon
-
- in your project to not get checkstyle errors.
-
-Testing on Windows
-------------------
-Some tests switch user to run commands as a different user. Location of binary to switch user is
-configurable:
-
-    windows.su.binary=ExecuteAs.exe
-
-Automatic capture of oozie logs
--------------------------------
-For full falcon regression runs. It might be desirable to pull all oozie job
-info and logs at the end of the test. This can be done by configuring Merlin.properties:
-
-    log.capture.oozie = true
-    log.capture.oozie.skip_info = false
-    log.capture.oozie.skip_log = true
-    log.capture.location = ../
-
-Dumping entities generated by falcon
-
-------------------------------------
-Add -Dmerlin.dump.staging to the maven command. For example:
-
-    mvn clean test -Phadoop-2 -Dmerlin.dump.staging=true

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/pom.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/pom.xml b/falcon-regression/merlin-core/pom.xml
deleted file mode 100644
index b1bc609..0000000
--- a/falcon-regression/merlin-core/pom.xml
+++ /dev/null
@@ -1,249 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<!-- pom for DataCommons , initinally contains bundle nd util files -->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.falcon.regression</groupId>
-        <artifactId>falcon-regression</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-merlin-core</artifactId>
-    <description>merlin-core - utilities for Apache Falcon regression suite</description>
-    <name>Apache Falcon Regression Suite Core</name>
-    <packaging>jar</packaging>
-    <profiles>
-        <profile>
-            <id>hadoop-2</id>
-            <dependencies>
-            	<dependency>
-                	<groupId>org.apache.hadoop</groupId>
-                	<artifactId>hadoop-common</artifactId>
-            	</dependency>
-
-                <dependency>
-                    <groupId>org.apache.hadoop</groupId>
-                    <artifactId>hadoop-auth</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive</groupId>
-                    <artifactId>hive-common</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive.hcatalog</groupId>
-                    <artifactId>hive-webhcat-java-client</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive.hcatalog</groupId>
-                    <artifactId>hive-hcatalog-core</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive</groupId>
-                    <artifactId>hive-jdbc</artifactId>
-                </dependency>
-
-                <dependency>
-                    <groupId>org.apache.hive</groupId>
-                    <artifactId>hive-metastore</artifactId>
-                </dependency>
-            </dependencies>
-        </profile>
-    </profiles>
-    <dependencies>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.httpcomponents</groupId>
-            <artifactId>httpclient</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.httpcomponents</groupId>
-            <artifactId>httpcore</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <version>2.6</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.activemq</groupId>
-            <artifactId>activemq-all</artifactId>
-            <version>${activemq.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-pool</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-openwire-legacy</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-camel</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-jaas</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-broker</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-console</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-shiro</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-spring</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-jms-pool</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-amqp</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-mqtt</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-stomp</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.apache.activemq</groupId>
-                    <artifactId>activemq-leveldb-store</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.fusesource.hawtbuf</groupId>
-                    <artifactId>hawtbuf</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-
-
-        <dependency>
-            <groupId>com.jcraft</groupId>
-            <artifactId>jsch</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.google.code.gson</groupId>
-            <artifactId>gson</artifactId>
-            <scope>compile</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>com.googlecode.json-simple</groupId>
-            <artifactId>json-simple</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>xml-apis</groupId>
-            <artifactId>xml-apis</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>xmlunit</groupId>
-            <artifactId>xmlunit</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.xml.bind</groupId>
-            <artifactId>jaxb-api</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.oozie</groupId>
-            <artifactId>oozie-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>joda-time</groupId>
-            <artifactId>joda-time</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-io</groupId>
-            <artifactId>commons-io</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>com.google.code.findbugs</groupId>
-            <artifactId>annotations</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>javax.jms</groupId>
-            <artifactId>jms</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.falcon</groupId>
-            <artifactId>falcon-client</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-exec</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.seleniumhq.selenium</groupId>
-            <artifactId>selenium-firefox-driver</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>org.seleniumhq.selenium</groupId>
-            <artifactId>selenium-support</artifactId>
-        </dependency>
-
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ClusterMerlin.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ClusterMerlin.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ClusterMerlin.java
deleted file mode 100644
index 1d25d12..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ClusterMerlin.java
+++ /dev/null
@@ -1,325 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.Entities;
-
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.ACL;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfaces;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.entity.v0.cluster.Locations;
-import org.apache.falcon.entity.v0.cluster.Properties;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-import org.testng.asserts.SoftAssert;
-
-import javax.xml.bind.JAXBException;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/** Class for representing a cluster xml. */
-public class ClusterMerlin extends Cluster {
-    private static final Logger LOGGER = Logger.getLogger(ClusterMerlin.class);
-    public ClusterMerlin(String clusterData) {
-        final Cluster cluster = (Cluster) TestEntityUtil.fromString(EntityType.CLUSTER,
-                clusterData);
-        try {
-            PropertyUtils.copyProperties(this, cluster);
-        } catch (ReflectiveOperationException e) {
-            Assert.fail("Can't create ClusterMerlin: " + ExceptionUtils.getStackTrace(e));
-        }
-    }
-
-    @Override
-    public String toString() {
-        try {
-            StringWriter sw = new StringWriter();
-            EntityType.CLUSTER.getMarshaller().marshal(this, sw);
-            return sw.toString();
-        } catch (JAXBException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /**
-     * Sets unique names for the cluster.
-     * @return mapping of old name to new name
-     * @param prefix prefix of new name
-     */
-    public Map<? extends String, ? extends String> setUniqueName(String prefix) {
-        final String oldName = getName();
-        final String newName = TestEntityUtil.generateUniqueName(prefix, oldName);
-        setName(newName);
-        final HashMap<String, String> nameMap = new HashMap<>(1);
-        nameMap.put(oldName, newName);
-        return nameMap;
-    }
-
-    /**
-     * Set ACL.
-     */
-    public void setACL(String owner, String group, String permission) {
-        ACL acl = new ACL();
-        acl.setOwner(owner);
-        acl.setGroup(group);
-        acl.setPermission(permission);
-        this.setACL(acl);
-    }
-
-    public void setInterface(Interfacetype interfacetype, String endpoint, String version) {
-        final Interfaces interfaces = this.getInterfaces();
-        final List<Interface> interfaceList = interfaces.getInterfaces();
-        for (final Interface anInterface : interfaceList) {
-            if (anInterface.getType() == interfacetype) {
-                anInterface.setEndpoint(endpoint);
-                if (StringUtils.isNotBlank(version)) {
-                    anInterface.setVersion(version);
-                }
-            }
-        }
-    }
-
-    public void setWorkingLocationPath(String path) {
-        for (Location location : getLocations().getLocations()) {
-            if (location.getName() == ClusterLocationType.WORKING) {
-                location.setPath(path);
-                break;
-            }
-        }
-    }
-
-    public String getInterfaceEndpoint(final Interfacetype interfaceType) {
-        String value = null;
-        for (Interface anInterface : getInterfaces().getInterfaces()) {
-            if (anInterface.getType() == interfaceType) {
-                value = anInterface.getEndpoint();
-            }
-        }
-        LOGGER.info("Cluster: " + getName() + " interfaceType: " + interfaceType
-            + " value:" + value);
-        return value;
-    }
-
-    public String getProperty(final String propName) {
-        String value = null;
-        for (Property property : getProperties().getProperties()) {
-            if (property.getName().trim().equals(propName.trim())) {
-                value = property.getValue();
-            }
-        }
-        LOGGER.info("Cluster: " + getName() + " property: " + propName + " value:" + value);
-        return value;
-    }
-
-    public String getLocation(final String locationType) {
-        String value = null;
-        for (Location location : getLocations().getLocations()) {
-            if (location.getName().name().trim().equalsIgnoreCase(locationType.trim().toLowerCase())) {
-                value = location.getPath();
-            }
-        }
-        LOGGER.info("Cluster: " + getName() + " locationType: " + locationType + " value:" + value);
-        return value;
-    }
-
-    /**
-     * Cleans all properties and returns empty cluster as a draft (as we can't create cluster e.g from empty string).
-     */
-    public ClusterMerlin getEmptyCluster() {
-        ClusterMerlin clusterMerlin = new ClusterMerlin(this.toString());
-        clusterMerlin.setName("");
-        clusterMerlin.setDescription(null);
-        clusterMerlin.setColo(null);
-        clusterMerlin.setTags(null);
-        clusterMerlin.setInterfaces(new Interfaces());
-        clusterMerlin.setLocations(new Locations());
-        clusterMerlin.getACL().setGroup("");
-        clusterMerlin.getACL().setOwner("");
-        clusterMerlin.setProperties(new Properties());
-        return clusterMerlin;
-    }
-
-    public void addLocation(ClusterLocationType type, String path) {
-        Location newLocation = new Location();
-        newLocation.setName(type);
-        newLocation.setPath(path);
-        getLocations().getLocations().add(newLocation);
-    }
-
-    /**
-     * Add/replace a property.
-     * @param name name of the property
-     * @param value value of the property
-     * @return this
-     */
-    public ClusterMerlin withProperty(String name, String value) {
-        final List<Property> properties = getProperties().getProperties();
-        //if property with same name exists, just replace the value
-        for (Property property : properties) {
-            if (property.getName().equals(name)) {
-                LOGGER.info(String.format("Overwriting property name = %s oldVal = %s newVal = %s",
-                    property.getName(), property.getValue(), value));
-                property.setValue(value);
-                return this;
-            }
-        }
-        //if property is not added already, add it
-        final Property property = new Property();
-        property.setName(name);
-        property.setValue(value);
-        properties.add(property);
-        return this;
-    }
-
-    public void addInterface(Interfacetype type, String endpoint, String version) {
-        Interface iface = new Interface();
-        iface.setType(type);
-        iface.setEndpoint(endpoint);
-        iface.setVersion(version);
-        getInterfaces().getInterfaces().add(iface);
-    }
-
-    public void assertEquals(ClusterMerlin cluster) {
-        LOGGER.info(String.format("Comparing : source: %n%s%n and cluster: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(cluster.toString())));
-        SoftAssert softAssert = new SoftAssert();
-        softAssert.assertEquals(name, cluster.getName(), "Cluster name is different.");
-        softAssert.assertEquals(colo, cluster.getColo(), "Cluster colo is different.");
-        softAssert.assertEquals(description, cluster.getDescription(), "Cluster description is different.");
-        softAssert.assertEquals(tags, cluster.getTags(), "Cluster tags are different.");
-        softAssert.assertTrue(interfacesEqual(interfaces.getInterfaces(), cluster.getInterfaces().getInterfaces()),
-            "Cluster interfaces are different");
-        softAssert.assertTrue(locationsEqual(locations.getLocations(), cluster.getLocations().getLocations()),
-            "Cluster locations are different");
-        softAssert.assertEquals(acl.getGroup(), cluster.getACL().getGroup(), "Cluster acl group is different.");
-        softAssert.assertEquals(acl.getOwner(), cluster.getACL().getOwner(), "Cluster acl owner is different.");
-        softAssert.assertEquals(acl.getPermission(), cluster.getACL().getPermission(),
-            "Cluster acl permissions is different.");
-        softAssert.assertTrue(propertiesEqual(properties.getProperties(), cluster.getProperties().getProperties()),
-            "Cluster properties are different.");
-        softAssert.assertAll();
-    }
-
-    private static boolean checkEquality(String str1, String str2, String message){
-        if (!str1.equals(str2)) {
-            LOGGER.info(String.format("Cluster %s are different: %s and %s.", message, str1, str2));
-            return false;
-        }
-        return true;
-    }
-
-    private static boolean interfacesEqual(List<Interface> srcInterfaces, List<Interface> trgInterfaces) {
-        if (srcInterfaces.size() == trgInterfaces.size()) {
-            boolean equality = false;
-            for(Interface iface1: srcInterfaces){
-                for(Interface iface2 : trgInterfaces) {
-                    if (iface2.getType().value().equals(iface1.getType().value())) {
-                        equality = checkEquality(iface1.getEndpoint(), iface2.getEndpoint(),
-                            iface1.getType().value() + " interface endpoints");
-                        equality &= checkEquality(iface1.getVersion(), iface2.getVersion(),
-                            iface1.getType().value() + " interface versions");
-                    }
-                }
-            }
-            return equality;
-        } else {
-            return false;
-        }
-    }
-
-    private static boolean propertiesEqual(List<Property> srcProps, List<Property> trgProps) {
-        if (srcProps.size() == trgProps.size()) {
-            boolean equality = true;
-            for(Property prop1: srcProps){
-                for(Property prop2 : trgProps) {
-                    if (prop2.getName().equals(prop1.getName())) {
-                        equality &= checkEquality(prop1.getValue(), prop2.getValue(),
-                            prop1.getName() + " property values");
-                    }
-                }
-            }
-            return equality;
-        } else {
-            return false;
-        }
-    }
-
-    /**
-     * Compares two lists of locations.
-     */
-    private static boolean locationsEqual(List<Location> srcLocations, List<Location> objLocations) {
-        if (srcLocations.size() != objLocations.size()) {
-            return false;
-        }
-    nextType:
-        for (ClusterLocationType type : ClusterLocationType.values()) {
-            List<Location> locations1 = new ArrayList<>();
-            List<Location> locations2 = new ArrayList<>();
-            //get locations of the same type
-            for (int i = 0; i < srcLocations.size(); i++) {
-                if (srcLocations.get(i).getName() == type) {
-                    locations1.add(srcLocations.get(i));
-                }
-                if (objLocations.get(i).getName() == type) {
-                    locations2.add(objLocations.get(i));
-                }
-            }
-            //compare locations of the same type. At least 1 match should be present.
-            if (locations1.size() != locations2.size()) {
-                return false;
-            }
-            for (Location location1 : locations1) {
-                for (Location location2 : locations2) {
-                    if (location1.getPath().equals(location2.getPath())) {
-                        continue nextType;
-                    }
-                }
-            }
-            return false;
-        }
-        return true;
-    }
-
-    public Location getLocation(ClusterLocationType type) {
-        List<Location> locationsOfType = new ArrayList<>();
-        for(Location location : locations.getLocations()) {
-            if (location.getName() == type) {
-                locationsOfType.add(location);
-            }
-        }
-        Assert.assertEquals(locationsOfType.size(), 1, "Unexpected number of " + type + " locations in: " + this);
-        return locationsOfType.get(0);
-    }
-    @Override
-    public EntityType getEntityType() {
-        return EntityType.CLUSTER;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/FeedMerlin.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/FeedMerlin.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/FeedMerlin.java
deleted file mode 100644
index ba1d228..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/FeedMerlin.java
+++ /dev/null
@@ -1,537 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.Entities;
-
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.ACL;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.feed.Retention;
-import org.apache.falcon.entity.v0.feed.Validity;
-import org.apache.falcon.entity.v0.feed.Sla;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.testng.Assert;
-import org.apache.log4j.Logger;
-import org.testng.asserts.SoftAssert;
-
-
-import javax.xml.bind.JAXBException;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/** Class for representing a feed xml. */
-public class FeedMerlin extends Feed {
-    private static final Logger LOGGER = Logger.getLogger(FeedMerlin.class);
-
-    public FeedMerlin(String feedData) {
-        this((Feed) TestEntityUtil.fromString(EntityType.FEED, feedData));
-    }
-
-    public FeedMerlin(final Feed feed) {
-        try {
-            PropertyUtils.copyProperties(this, feed);
-            this.setACL(feed.getACL());
-        } catch (ReflectiveOperationException e) {
-            Assert.fail("Can't create FeedMerlin: " + ExceptionUtils.getStackTrace(e));
-        }
-    }
-
-    public static List<FeedMerlin> fromString(List<String> feedStrings) {
-        List<FeedMerlin> feeds = new ArrayList<>();
-        for (String feedString : feedStrings) {
-            feeds.add(fromString(feedString));
-        }
-        return feeds;
-    }
-
-    public static FeedMerlin fromString(String feedString) {
-        return new FeedMerlin(feedString);
-    }
-
-    public List<String> getClusterNames() {
-        List<String> names = new ArrayList<>();
-        for (Cluster cluster : getClusters().getClusters()) {
-            names.add(cluster.getName());
-        }
-        return names;
-    }
-
-    /**
-     * Add/replace a property.
-     * @param name name of the property
-     * @param value value of the property
-     * @return this
-     */
-    public FeedMerlin withProperty(String name, String value) {
-        final List<Property> properties = getProperties().getProperties();
-        //if property with same name exists, just replace the value
-        for (Property property : properties) {
-            if (property.getName().equals(name)) {
-                LOGGER.info(String.format("Overwriting property name = %s oldVal = %s newVal = %s",
-                    property.getName(), property.getValue(), value));
-                property.setValue(value);
-                return this;
-            }
-        }
-        //if property is not added already, add it
-        final Property property = new Property();
-        property.setName(name);
-        property.setValue(value);
-        properties.add(property);
-        return this;
-    }
-
-    /**
-     * Return feed path of the specified type.
-     * @return feed data path
-     * @param locationType the type of the location
-     */
-    public String getFeedPath(LocationType locationType) {
-        for (Location location : this.getLocations().getLocations()) {
-            if (location.getType() == locationType) {
-                return location.getPath();
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Sets cut-off period.
-     * @param frequency cut-off period
-     */
-    public FeedMerlin insertLateFeedValue(Frequency frequency) {
-        this.getLateArrival().setCutOff(frequency);
-        return this;
-    }
-
-    /**
-     * Sets data location for a feed.
-     * @param pathValue new path
-     */
-    public FeedMerlin setFeedPathValue(String pathValue) {
-        for (Location location : this.getLocations().getLocations()) {
-            if (location.getType() == LocationType.DATA) {
-                location.setPath(pathValue);
-            }
-        }
-        return this;
-    }
-
-    /**
-     * Sets name for a cluster by given order number.
-     * @param clusterName new cluster name
-     * @param clusterIndex index of cluster which should be updated
-     */
-    public FeedMerlin setClusterNameInFeed(String clusterName, int clusterIndex) {
-        this.getClusters().getClusters().get(clusterIndex).setName(clusterName);
-        return this;
-    }
-
-    /** clear clusters of this feed. */
-    public FeedMerlin clearFeedClusters() {
-        getClusters().getClusters().clear();
-        return this;
-    }
-
-    /** add a feed cluster to this feed. */
-    public FeedMerlin addFeedCluster(Cluster cluster) {
-        getClusters().getClusters().add(cluster);
-        return this;
-    }
-
-    /** Fluent builder wrapper for cluster fragment of feed entity . */
-    public static class FeedClusterBuilder {
-        private Cluster cluster = new Cluster();
-
-        public FeedClusterBuilder(String clusterName) {
-            cluster.setName(clusterName);
-        }
-
-        public Cluster build() {
-            Cluster retVal = cluster;
-            cluster = null;
-            return retVal;
-        }
-
-        public FeedClusterBuilder withRetention(String limit, ActionType action) {
-            Retention r = new Retention();
-            r.setLimit(new Frequency(limit));
-            r.setAction(action);
-            cluster.setRetention(r);
-            return this;
-        }
-
-        public FeedClusterBuilder withValidity(String startTime, String endTime) {
-            Validity v = new Validity();
-            v.setStart(TimeUtil.oozieDateToDate(startTime).toDate());
-            v.setEnd(TimeUtil.oozieDateToDate(endTime).toDate());
-            cluster.setValidity(v);
-            return this;
-        }
-
-        public FeedClusterBuilder withClusterType(ClusterType type) {
-            cluster.setType(type);
-            return this;
-        }
-
-        public FeedClusterBuilder withPartition(String partition) {
-            cluster.setPartition(partition);
-            return this;
-        }
-
-        public FeedClusterBuilder withTableUri(String tableUri) {
-            CatalogTable catalogTable = new CatalogTable();
-            catalogTable.setUri(tableUri);
-            cluster.setTable(catalogTable);
-            return this;
-        }
-
-        public FeedClusterBuilder withDataLocation(String dataLocation) {
-            Location oneLocation = new Location();
-            oneLocation.setPath(dataLocation);
-            oneLocation.setType(LocationType.DATA);
-
-            Locations feedLocations = new Locations();
-            feedLocations.getLocations().add(oneLocation);
-            cluster.setLocations(feedLocations);
-            return this;
-        }
-
-        public FeedClusterBuilder withDelay(Frequency frequency) {
-            cluster.setDelay(frequency);
-            return this;
-        }
-
-
-    }
-
-    /**
-     * Method sets a number of clusters to feed definition.
-     *
-     * @param newClusters list of definitions of clusters which are to be set to feed
-     * @param location location of data on every cluster
-     * @param startTime start of feed validity on every cluster
-     * @param endTime end of feed validity on every cluster
-     */
-    public void setFeedClusters(List<String> newClusters, String location, String startTime,
-                                String endTime) {
-        clearFeedClusters();
-        setFrequency(new Frequency("" + 5, Frequency.TimeUnit.minutes));
-
-        for (String newCluster : newClusters) {
-            Cluster feedCluster = new FeedClusterBuilder(new ClusterMerlin(newCluster).getName())
-                .withDataLocation(location + "/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}")
-                .withValidity(TimeUtil.addMinsToTime(startTime, -180),
-                    TimeUtil.addMinsToTime(endTime, 180))
-                .withRetention("hours(20)", ActionType.DELETE)
-                .build();
-            addFeedCluster(feedCluster);
-        }
-    }
-
-    public void setRetentionValue(String retentionValue) {
-        for (org.apache.falcon.entity.v0.feed.Cluster cluster : getClusters().getClusters()) {
-            cluster.getRetention().setLimit(new Frequency(retentionValue));
-        }
-    }
-
-    public void setTableValue(String dBName, String tableName, String pathValue) {
-        getTable().setUri("catalog:" + dBName + ":" + tableName + "#" + pathValue);
-    }
-
-    @Override
-    public String toString() {
-        try {
-            StringWriter sw = new StringWriter();
-            EntityType.FEED.getMarshaller().marshal(this, sw);
-            return sw.toString();
-        } catch (JAXBException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public void setLocation(LocationType locationType, String feedInputPath) {
-        for (Location location : getLocations().getLocations()) {
-            if (location.getType() == locationType) {
-                location.setPath(feedInputPath);
-            }
-        }
-    }
-
-    /**
-     * Sets unique names for the feed.
-     * @return mapping of old name to new name
-     * @param prefix prefix of new name
-     */
-    public Map<? extends String, ? extends String> setUniqueName(String prefix) {
-        final String oldName = getName();
-        final String newName = TestEntityUtil.generateUniqueName(prefix, oldName);
-        setName(newName);
-        final HashMap<String, String> nameMap = new HashMap<>(1);
-        nameMap.put(oldName, newName);
-        return nameMap;
-    }
-
-    public void renameClusters(Map<String, String> clusterNameMap) {
-        for (Cluster cluster : getClusters().getClusters()) {
-            final String oldName = cluster.getName();
-            final String newName = clusterNameMap.get(oldName);
-            if (!StringUtils.isEmpty(newName)) {
-                cluster.setName(newName);
-            }
-        }
-    }
-
-    /**
-     * Set ACL.
-     */
-    public void setACL(String owner, String group, String permission) {
-        ACL acl = new ACL();
-        acl.setOwner(owner);
-        acl.setGroup(group);
-        acl.setPermission(permission);
-        this.setACL(acl);
-    }
-
-    /**
-     * Sel SLA.
-     * @param slaLow : low value of SLA
-     * @param slaHigh : high value of SLA
-     */
-
-    public void setSla(Frequency slaLow, Frequency slaHigh) {
-        Sla sla = new Sla();
-        sla.setSlaLow(slaLow);
-        sla.setSlaHigh(slaHigh);
-        this.setSla(sla);
-    }
-
-    /**
-     * Sets new feed data path (for first location).
-     *
-     * @param path new feed data path
-     */
-    public void setFilePath(String path) {
-        getLocations().getLocations().get(0).setPath(path);
-    }
-
-
-    /**
-     * Retrieves prefix (main sub-folders) of first feed data path.
-     */
-    public String getFeedPrefix() {
-        String path = getLocations().getLocations().get(0).getPath();
-        return path.substring(0, path.indexOf('$'));
-    }
-
-    public void setValidity(String feedStart, String feedEnd) {
-        this.getClusters().getClusters().get(0).getValidity()
-            .setStart(TimeUtil.oozieDateToDate(feedStart).toDate());
-        this.getClusters().getClusters().get(0).getValidity()
-            .setEnd(TimeUtil.oozieDateToDate(feedEnd).toDate());
-
-    }
-
-    public void setDataLocationPath(String path) {
-        final List<Location> locations = this.getLocations().getLocations();
-        for (Location location : locations) {
-            if (location.getType() == LocationType.DATA) {
-                location.setPath(path);
-            }
-        }
-    }
-
-    public void setPeriodicity(int frequency, Frequency.TimeUnit periodicity) {
-        Frequency frq = new Frequency(String.valueOf(frequency), periodicity);
-        this.setFrequency(frq);
-    }
-
-    public void setTableUri(String tableUri) {
-        final CatalogTable catalogTable = new CatalogTable();
-        catalogTable.setUri(tableUri);
-        this.setTable(catalogTable);
-    }
-
-    @Override
-    public EntityType getEntityType() {
-        return EntityType.FEED;
-    }
-
-    public void assertGeneralProperties(FeedMerlin newFeed){
-
-        LOGGER.info(String.format("Comparing General Properties: source: %n%s%n and feed: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(newFeed.toString())));
-
-        SoftAssert softAssert = new SoftAssert();
-
-        // Assert all the the General Properties
-        softAssert.assertEquals(newFeed.getName(), getName(),
-            "Feed Name is different");
-        softAssert.assertEquals(newFeed.getDescription(), getDescription(),
-            "Feed Description is different");
-        softAssert.assertEquals(newFeed.getTags(), getTags(),
-            "Feed Tags is different");
-        softAssert.assertEquals(newFeed.getGroups(), getGroups(),
-            "Feed Groups is different");
-        softAssert.assertEquals(newFeed.getACL().getOwner(), getACL().getOwner(),
-            "Feed ACL Owner is different");
-        softAssert.assertEquals(newFeed.getACL().getGroup(), getACL().getGroup(),
-            "Feed ACL Group is different");
-        softAssert.assertEquals(newFeed.getACL().getPermission(), getACL().getPermission(),
-            "Feed ACL Permission is different");
-        softAssert.assertEquals(newFeed.getSchema().getLocation(), getSchema().getLocation(),
-            "Feed Schema Location is different");
-        softAssert.assertEquals(newFeed.getSchema().getProvider(), getSchema().getProvider(),
-            "Feed Schema Provider is different");
-        softAssert.assertAll();
-
-    }
-
-    public void assertPropertiesInfo(FeedMerlin newFeed){
-
-        LOGGER.info(String.format("Comparing Properties Info: source: %n%s%n and feed: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(newFeed.toString())));
-
-        SoftAssert softAssert = new SoftAssert();
-
-        // Assert all the Properties Info
-        softAssert.assertEquals(newFeed.getFrequency().getFrequency(),
-            getFrequency().getFrequency(),
-            "Feed Frequency is different");
-        softAssert.assertEquals(newFeed.getFrequency().getTimeUnit().toString(),
-            getFrequency().getTimeUnit().toString(),
-            "Feed Frequency Unit is different");
-        softAssert.assertEquals(newFeed.getLateArrival().getCutOff().getFrequencyAsInt(),
-            getLateArrival().getCutOff().getFrequencyAsInt(),
-            "Feed CutOff is different");
-        softAssert.assertEquals(newFeed.getLateArrival().getCutOff().getTimeUnit(),
-            getLateArrival().getCutOff().getTimeUnit(),
-            "Feed CutOff Unit is different");
-        softAssert.assertEquals(newFeed.getAvailabilityFlag(),
-            getAvailabilityFlag(),
-            "Feed Availability Flag is different");
-        softAssert.assertEquals(newFeed.getProperties().getProperties().get(0).getName(),
-            getProperties().getProperties().get(0).getName(),
-            "Feed Property1 Name is different");
-        softAssert.assertEquals(newFeed.getProperties().getProperties().get(0).getValue(),
-            getProperties().getProperties().get(0).getValue(),
-            "Feed Property1 Value is different");
-        softAssert.assertEquals(newFeed.getProperties().getProperties().get(1).getName(),
-            getProperties().getProperties().get(1).getName(),
-            "Feed Property2 Name is different");
-        softAssert.assertEquals(newFeed.getProperties().getProperties().get(1).getValue(),
-            getProperties().getProperties().get(1).getValue(),
-            "Feed Property2 Value is different");
-
-        softAssert.assertAll();
-    }
-
-    public void assertLocationInfo(FeedMerlin newFeed){
-
-        LOGGER.info(String.format("Comparing Location Info: source: %n%s%n and feed: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(newFeed.toString())));
-
-        SoftAssert softAssert = new SoftAssert();
-
-        // Assert all the Location Properties
-        softAssert.assertEquals(newFeed.getLocations().getLocations().get(0).getPath(),
-            getLocations().getLocations().get(0).getPath(),
-            "Feed Location Data Path is different");
-        softAssert.assertEquals(newFeed.getLocations().getLocations().get(1).getPath(),
-            getLocations().getLocations().get(1).getPath(),
-            "Feed Location Stats Path is different");
-        softAssert.assertEquals(newFeed.getLocations().getLocations().get(2).getPath(),
-            getLocations().getLocations().get(2).getPath(),
-            "Feed Location Meta Path is different");
-
-        softAssert.assertAll();
-
-    }
-
-    public void assertClusterInfo(FeedMerlin newFeed){
-
-        LOGGER.info(String.format("Comparing Feed Cluster Info: source: %n%s%n and feed: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(newFeed.toString())));
-
-        SoftAssert softAssert = new SoftAssert();
-
-        // Assert all the Cluster Properties
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getName(),
-            getClusters().getClusters().get(0).getName(),
-            "Feed Cluster Name is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getLocations().getLocations().get(0).getPath(),
-            getLocations().getLocations().get(0).getPath(),
-            "Feed Cluster Data Path is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getLocations().getLocations().get(1).getPath(),
-            getLocations().getLocations().get(1).getPath(),
-            "Feed Cluster Stats Path is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getLocations().getLocations().get(2).getPath(),
-            getLocations().getLocations().get(2).getPath(),
-            "Feed Cluster Meta Path is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getValidity().getStart(),
-            getClusters().getClusters().get(0).getValidity().getStart(),
-            "Feed Cluster Start Date is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getValidity().getEnd(),
-            getClusters().getClusters().get(0).getValidity().getEnd(),
-            "Feed Cluster End Date is different");
-        // Asserting on hardcoded value of 99, due to UI bug which only support till two digits.
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getRetention().getLimit().getFrequency(), "99",
-            "Feed Retention is different");
-        softAssert.assertEquals(newFeed.getClusters().getClusters().get(0)
-                .getRetention().getLimit().getTimeUnit().name(),
-            getClusters().getClusters().get(0).getRetention().getLimit().getTimeUnit().name(),
-            "Feed Retention Unit is different");
-
-        softAssert.assertAll();
-
-    }
-
-    public void assertEquals(FeedMerlin newFeed) {
-
-        assertGeneralProperties(newFeed);
-        assertPropertiesInfo(newFeed);
-        assertLocationInfo(newFeed);
-        assertClusterInfo(newFeed);
-    }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ProcessMerlin.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ProcessMerlin.java b/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ProcessMerlin.java
deleted file mode 100644
index fb76da1..0000000
--- a/falcon-regression/merlin-core/src/main/java/org/apache/falcon/regression/Entities/ProcessMerlin.java
+++ /dev/null
@@ -1,691 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression.Entities;
-
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.ACL;
-import org.apache.falcon.entity.v0.process.Cluster;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.entity.v0.process.Properties;
-import org.apache.falcon.entity.v0.process.Property;
-import org.apache.falcon.entity.v0.process.Sla;
-import org.apache.falcon.entity.v0.process.Validity;
-import org.apache.falcon.entity.v0.process.Workflow;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-import org.testng.asserts.SoftAssert;
-
-import javax.xml.bind.JAXBException;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-/** Class for representing a process xml. */
-public class ProcessMerlin extends Process {
-    private static final Logger LOGGER = Logger.getLogger(ProcessMerlin.class);
-    public ProcessMerlin(String processData) {
-        this((Process) TestEntityUtil.fromString(EntityType.PROCESS, processData));
-    }
-
-    public ProcessMerlin(final Process process) {
-        try {
-            PropertyUtils.copyProperties(this, process);
-        } catch (ReflectiveOperationException e) {
-            Assert.fail("Can't create ProcessMerlin: " + ExceptionUtils.getStackTrace(e));
-        }
-    }
-
-    public ProcessMerlin clearProcessCluster() {
-        getClusters().getClusters().clear();
-        return this;
-    }
-
-    public ProcessMerlin addProcessCluster(Cluster cluster) {
-        getClusters().getClusters().add(cluster);
-        return this;
-    }
-
-    public List<String> getClusterNames() {
-        List<String> names = new ArrayList<>();
-        for (Cluster cluster : getClusters().getClusters()) {
-            names.add(cluster.getName());
-        }
-        return names;
-    }
-
-    public Cluster getClusterByName(String name) {
-        for (Cluster cluster : getClusters().getClusters()) {
-            if (name.equals(cluster.getName())) {
-                return cluster;
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Compares two process cluster lists, if they are equal or not.
-     */
-    public static void assertClustersEqual(List<Cluster> clusters1, List<Cluster> clusters2) {
-        if (clusters1.size() != clusters2.size()) {
-            Assert.fail("Cluster sizes are different.");
-        }
-        Comparator<Cluster> clusterComparator = new Comparator<Cluster>() {
-            @Override
-            public int compare(Cluster cluster1, Cluster cluster2) {
-                return cluster1.getName().compareTo(cluster2.getName());
-            }
-        };
-        Collections.sort(clusters1, clusterComparator);
-        Collections.sort(clusters2, clusterComparator);
-        SoftAssert softAssert = new SoftAssert();
-        for(int i = 0; i < clusters1.size(); i++) {
-            Cluster cluster1 = clusters1.get(i);
-            Cluster cluster2 = clusters2.get(i);
-            softAssert.assertEquals(cluster1.getName(), cluster2.getName(), "Cluster names are different.");
-            softAssert.assertEquals(cluster1.getValidity().getStart(), cluster2.getValidity().getStart(),
-                String.format("Validity start is not the same for cluster %s", cluster1.getName()));
-            softAssert.assertEquals(cluster1.getValidity().getEnd(), cluster2.getValidity().getEnd(),
-                String.format("Cluster validity end is not the same for cluster %s", cluster1.getName()));
-        }
-        softAssert.assertAll();
-    }
-
-    public Input getInputByName(String name) {
-        for (Input input : getInputs().getInputs()) {
-            if (input.getName().equals(name)) {
-                return input;
-            }
-        }
-        return null;
-    }
-
-    public Output getOutputByName(String name) {
-        for (Output output : getOutputs().getOutputs()) {
-            if (output.getName().equals(name)) {
-                return output;
-            }
-        }
-        return null;
-    }
-
-    /** Fluent builder wrapper for cluster fragment of process entity . */
-    public static class ProcessClusterBuilder {
-        private Cluster cluster = new Cluster();
-
-        public ProcessClusterBuilder(String clusterName) {
-            cluster.setName(clusterName);
-        }
-
-        public Cluster build() {
-            Cluster retVal = cluster;
-            cluster = null;
-            return retVal;
-        }
-
-        public ProcessClusterBuilder withValidity(String startTime, String endTime) {
-            Validity v = new Validity();
-            v.setStart(TimeUtil.oozieDateToDate(startTime).toDate());
-            v.setEnd(TimeUtil.oozieDateToDate(endTime).toDate());
-            cluster.setValidity(v);
-            return this;
-        }
-
-    }
-
-    /**
-     * Method sets a number of clusters to process definition.
-     *
-     * @param newClusters list of definitions of clusters which are to be set to process
-     *                    (clusters on which process should run)
-     * @param startTime start of process validity on every cluster
-     * @param endTime end of process validity on every cluster
-     */
-    public void setProcessClusters(List<String> newClusters, String startTime, String endTime) {
-        clearProcessCluster();
-        for (String newCluster : newClusters) {
-            final Cluster processCluster = new ProcessClusterBuilder(
-                new ClusterMerlin(newCluster).getName())
-                .withValidity(startTime, endTime)
-                .build();
-            addProcessCluster(processCluster);
-        }
-    }
-
-    public final ProcessMerlin clearProperties() {
-        final Properties properties = new Properties();
-        setProperties(properties);
-        return this;
-    }
-
-    /**
-     * Add/replace a property.
-     * @param name name of the property
-     * @param value value of the property
-     * @return this
-     */
-    public final ProcessMerlin withProperty(String name, String value) {
-        final List<Property> properties = getProperties().getProperties();
-        //if property with same name exists, just replace the value
-        for (Property property : properties) {
-            if (property.getName().equals(name)) {
-                LOGGER.info(String.format("Overwriting property name = %s oldVal = %s newVal = %s",
-                    property.getName(), property.getValue(), value));
-                property.setValue(value);
-                return this;
-            }
-        }
-        //if property is not added already, add it
-        final Property property = new Property();
-        property.setName(name);
-        property.setValue(value);
-        properties.add(property);
-        return this;
-    }
-
-    public String getProperty(String name) {
-        for (Property property : properties.getProperties()) {
-            if (property.getName().equals(name)) {
-                return property.getValue();
-            }
-        }
-        return null;
-    }
-
-    @Override
-    public String toString() {
-        try {
-            StringWriter sw = new StringWriter();
-            EntityType.PROCESS.getMarshaller().marshal(this, sw);
-            return sw.toString();
-        } catch (JAXBException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public void renameClusters(Map<String, String> clusterNameMap) {
-        for (Cluster cluster : getClusters().getClusters()) {
-            final String oldName = cluster.getName();
-            final String newName = clusterNameMap.get(oldName);
-            if (!StringUtils.isEmpty(newName)) {
-                cluster.setName(newName);
-            }
-        }
-    }
-
-    public void renameFeeds(Map<String, String> feedNameMap) {
-        for(Input input : getInputs().getInputs()) {
-            final String oldName = input.getFeed();
-            final String newName = feedNameMap.get(oldName);
-            if (!StringUtils.isEmpty(newName)) {
-                input.setFeed(newName);
-            }
-        }
-        for(Output output : getOutputs().getOutputs()) {
-            final String oldName = output.getFeed();
-            final String newName = feedNameMap.get(oldName);
-            if (!StringUtils.isEmpty(newName)) {
-                output.setFeed(newName);
-            }
-        }
-    }
-
-    /**
-     * Sets unique names for the process.
-     * @return mapping of old name to new name
-     * @param prefix prefix of new name
-     */
-    public Map<? extends String, ? extends String> setUniqueName(String prefix) {
-        final String oldName = getName();
-        final String newName = TestEntityUtil.generateUniqueName(prefix, oldName);
-        setName(newName);
-        final HashMap<String, String> nameMap = new HashMap<>(1);
-        nameMap.put(oldName, newName);
-        return nameMap;
-    }
-
-    /**
-     * Method sets optional/compulsory inputs and outputs of process according to list of feed
-     * definitions and matching numeric parameters. Optional inputs are set first and then
-     * compulsory ones.
-     *
-     * @param newDataSets list of feed definitions
-     * @param numberOfInputs number of desired inputs
-     * @param numberOfOptionalInput how many inputs should be optional
-     * @param numberOfOutputs number of outputs
-     */
-    public void setProcessFeeds(List<String> newDataSets,
-                                int numberOfInputs, int numberOfOptionalInput,
-                                int numberOfOutputs) {
-        int numberOfOptionalSet = 0;
-        boolean isFirst = true;
-
-        Inputs is = new Inputs();
-        for (int i = 0; i < numberOfInputs; i++) {
-            Input in = new Input();
-            in.setEnd("now(0,0)");
-            in.setStart("now(0,-20)");
-            if (numberOfOptionalSet < numberOfOptionalInput) {
-                in.setOptional(true);
-                in.setName("inputData" + i);
-                numberOfOptionalSet++;
-            } else {
-                in.setOptional(false);
-                if (isFirst) {
-                    in.setName("inputData");
-                    isFirst = false;
-                } else {
-                    in.setName("inputData" + i);
-                }
-            }
-            in.setFeed(new FeedMerlin(newDataSets.get(i)).getName());
-            is.getInputs().add(in);
-        }
-
-        setInputs(is);
-        if (numberOfInputs == 0) {
-            setInputs(null);
-        }
-
-        Outputs os = new Outputs();
-        for (int i = 0; i < numberOfOutputs; i++) {
-            Output op = new Output();
-            op.setFeed(new FeedMerlin(newDataSets.get(numberOfInputs - i)).getName());
-            op.setName("outputData");
-            op.setInstance("now(0,0)");
-            os.getOutputs().add(op);
-        }
-        setOutputs(os);
-        setLateProcess(null);
-    }
-
-    /**
-     * Sets process pipelines tag.
-     * @param pipelines set of pipelines to be set to process
-     */
-    public void setPipelineTag(String... pipelines){
-        if (ArrayUtils.isNotEmpty(pipelines)){
-            this.pipelines = StringUtils.join(pipelines, ",");
-        } else {
-            this.pipelines = null;
-        }
-    }
-
-    /**
-     * Set ACL.
-     */
-    public void setACL(String owner, String group, String permission) {
-        ACL acl = new ACL();
-        acl.setOwner(owner);
-        acl.setGroup(group);
-        acl.setPermission(permission);
-        this.setACL(acl);
-    }
-
-    /**
-     * Set SLA.
-     * @param slaStart : start value of SLA
-     * @param slaEnd : end value of SLA
-     */
-
-    public void setSla(Frequency slaStart, Frequency slaEnd) {
-        Sla sla = new Sla();
-        sla.setShouldStartIn(slaStart);
-        sla.setShouldEndIn(slaEnd);
-        this.setSla(sla);
-    }
-
-    /**
-     * Sets new process validity on all the process clusters.
-     *
-     * @param startTime start of process validity
-     * @param endTime   end of process validity
-     */
-    public void setValidity(String startTime, String endTime) {
-
-        for (Cluster cluster : this.getClusters().getClusters()) {
-            cluster.getValidity().setStart(TimeUtil.oozieDateToDate(startTime).toDate());
-            cluster.getValidity().setEnd(TimeUtil.oozieDateToDate(endTime).toDate());
-        }
-    }
-
-    /**
-     * Adds one output into process.
-     */
-    public void addOutputFeed(String outputName, String feedName) {
-        Output out1 = getOutputs().getOutputs().get(0);
-        Output out2 = new Output();
-        out2.setFeed(feedName);
-        out2.setName(outputName);
-        out2.setInstance(out1.getInstance());
-        getOutputs().getOutputs().add(out2);
-    }
-
-    /**
-     * Adds one input into process.
-     */
-    public void addInputFeed(String inputName, String feedName) {
-        Input in1 = getInputs().getInputs().get(0);
-        Input in2 = new Input();
-        in2.setEnd(in1.getEnd());
-        in2.setFeed(feedName);
-        in2.setName(inputName);
-        in2.setPartition(in1.getPartition());
-        in2.setStart(in1.getStart());
-        in2.setOptional(in1.isOptional());
-        getInputs().getInputs().add(in2);
-    }
-
-    public void setInputFeedWithEl(String inputFeedName, String startEl, String endEl) {
-        Inputs inputs = new Inputs();
-        Input input = new Input();
-        input.setFeed(inputFeedName);
-        input.setStart(startEl);
-        input.setEnd(endEl);
-        input.setName("inputData");
-        inputs.getInputs().add(input);
-        this.setInputs(inputs);
-    }
-
-    public void setDatasetInstances(String startInstance, String endInstance) {
-        this.getInputs().getInputs().get(0).setStart(startInstance);
-        this.getInputs().getInputs().get(0).setEnd(endInstance);
-    }
-
-    public void setProcessInputStartEnd(String start, String end) {
-        for (Input input : this.getInputs().getInputs()) {
-            input.setStart(start);
-            input.setEnd(end);
-        }
-    }
-
-    /**
-     * Sets name(s) of the process output(s).
-     *
-     * @param names new names of the outputs
-     */
-    public void setOutputNames(String... names) {
-        Outputs outputs = this.getOutputs();
-        Assert.assertEquals(outputs.getOutputs().size(), names.length,
-            "Number of output names is not equal to number of outputs in process");
-        for (int i = 0; i < names.length; i++) {
-            outputs.getOutputs().get(i).setName(names[i]);
-        }
-        this.setOutputs(outputs);
-    }
-
-    /**
-     * Sets partition for each input, according to number of supplied partitions.
-     *
-     * @param partition partitions to be set
-     */
-    public void setInputPartition(String... partition) {
-        for (int i = 0; i < partition.length; i++) {
-            this.getInputs().getInputs().get(i).setPartition(partition[i]);
-        }
-    }
-
-    /**
-     * Changes names of process inputs.
-     *
-     * @param names desired names of inputs
-     */
-    public void setInputNames(String... names) {
-        for (int i = 0; i < names.length; i++) {
-            this.getInputs().getInputs().get(i).setName(names[i]);
-        }
-    }
-
-    public void setPeriodicity(int frequency, Frequency.TimeUnit periodicity) {
-        Frequency frq = new Frequency(String.valueOf(frequency), periodicity);
-        this.setFrequency(frq);
-    }
-
-    public void setTimeOut(int magnitude, Frequency.TimeUnit unit) {
-        Frequency frq = new Frequency(String.valueOf(magnitude), unit);
-        this.setTimeout(frq);
-    }
-
-    public void setWorkflow(String wfPath, String libPath, EngineType engineType) {
-        Workflow w = this.getWorkflow();
-        if (engineType != null) {
-            w.setEngine(engineType);
-        }
-        if (libPath != null) {
-            w.setLib(libPath);
-        }
-        w.setPath(wfPath);
-        this.setWorkflow(w);
-    }
-
-    public String getFirstInputName() {
-        return getInputs().getInputs().get(0).getName();
-    }
-
-    @Override
-    public EntityType getEntityType() {
-        return EntityType.PROCESS;
-    }
-
-    public void assertGeneralProperties(ProcessMerlin newProcess){
-        SoftAssert softAssert = new SoftAssert();
-        // Assert all the the General Properties
-        softAssert.assertEquals(newProcess.getName(), getName(),
-            "Process Name is different");
-        softAssert.assertEquals(newProcess.getTags(), getTags(),
-            "Process Tags Value is different");
-        softAssert.assertEquals(newProcess.getWorkflow().getName(), getWorkflow().getName(),
-            "Process Workflow Name is different");
-        if (getWorkflow().getEngine() == EngineType.OOZIE || getWorkflow().getEngine() == null) {
-            softAssert.assertTrue(newProcess.getWorkflow().getEngine() == EngineType.OOZIE
-                || newProcess.getWorkflow().getEngine() == null, "Process Workflow Engine is different");
-        } else {
-            softAssert.assertEquals(newProcess.getWorkflow().getEngine().toString(),
-                getWorkflow().getEngine().toString(),
-                "Process Workflow Engine is different");
-        }
-        softAssert.assertEquals(newProcess.getWorkflow().getPath(), getWorkflow().getPath(),
-            "Process Workflow Path is different");
-        softAssert.assertEquals(newProcess.getACL().getOwner(), getACL().getOwner(),
-            "Process ACL Owner is different");
-        softAssert.assertEquals(newProcess.getACL().getGroup(), getACL().getGroup(),
-            "Process ACL Group is different");
-        softAssert.assertEquals(newProcess.getACL().getPermission(), getACL().getPermission(),
-            "Process ACL Permission is different");
-        softAssert.assertAll();
-    }
-
-    public void assertPropertiesInfo(ProcessMerlin newProcess){
-        SoftAssert softAssert = new SoftAssert();
-        // Assert all the Properties Info
-        softAssert.assertEquals(newProcess.getTimezone().getID(), getTimezone().getID(),
-            "Process TimeZone is different");
-        softAssert.assertEquals(newProcess.getFrequency().getFrequency(), getFrequency().getFrequency(),
-            "Process Frequency is different");
-        softAssert.assertEquals(newProcess.getFrequency().getTimeUnit().toString(),
-            getFrequency().getTimeUnit().toString(),
-            "Process Frequency Unit is different");
-        softAssert.assertEquals(newProcess.getParallel(), getParallel(),
-            "Process Parallel is different");
-        softAssert.assertEquals(newProcess.getOrder(), getOrder(),
-            "Process Order is different");
-        softAssert.assertEquals(newProcess.getRetry().getPolicy().value(),
-            getRetry().getPolicy().value(),
-            "Process Retry Policy is different");
-        softAssert.assertEquals(newProcess.getRetry().getAttempts(),
-            getRetry().getAttempts(),
-            "Process Retry Attempts is different");
-        softAssert.assertEquals(newProcess.getRetry().getDelay().getFrequency(),
-            getRetry().getDelay().getFrequency(),
-            "Process Delay Frequency is different");
-        softAssert.assertEquals(newProcess.getRetry().getDelay().getTimeUnit().name(),
-            getRetry().getDelay().getTimeUnit().name(),
-            "Process Delay Unit is different");
-        softAssert.assertAll();
-    }
-
-    /**
-     * Asserts equality of process inputs.
-     */
-    public void assertInputValues(ProcessMerlin newProcess){
-        Assert.assertEquals(newProcess.getInputs().getInputs().size(), getInputs().getInputs().size(),
-            "Processes have different number of inputs.");
-        SoftAssert softAssert = new SoftAssert();
-        // Assert all the Input values
-        for (int i = 0; i < newProcess.getInputs().getInputs().size(); i++) {
-            softAssert.assertEquals(newProcess.getInputs().getInputs().get(i).getName(),
-                getInputs().getInputs().get(i).getName(),
-                "Process Input Name is different");
-            softAssert.assertEquals(newProcess.getInputs().getInputs().get(i).getFeed(),
-                getInputs().getInputs().get(i).getFeed(),
-                "Process Input Feed is different");
-            softAssert.assertEquals(newProcess.getInputs().getInputs().get(i).getStart(),
-                getInputs().getInputs().get(i).getStart(),
-                "Process Input Start is different");
-            softAssert.assertEquals(newProcess.getInputs().getInputs().get(i).getEnd(),
-                getInputs().getInputs().get(i).getEnd(),
-                "Process Input End is different");
-            softAssert.assertEquals(newProcess.getInputs().getInputs().get(i).isOptional(),
-                getInputs().getInputs().get(i).isOptional(),
-                "Process Input optional param is different");
-        }
-        softAssert.assertAll();
-    }
-
-    /**
-     * Asserts equality of process outputs.
-     */
-    public void assertOutputValues(ProcessMerlin newProcess){
-        SoftAssert softAssert = new SoftAssert();
-        // Assert all the Output values
-        softAssert.assertEquals(newProcess.getOutputs().getOutputs().get(0).getName(),
-            getOutputs().getOutputs().get(0).getName(),
-            "Process Output Name is different");
-        softAssert.assertEquals(newProcess.getOutputs().getOutputs().get(0).getFeed(),
-            getOutputs().getOutputs().get(0).getFeed(),
-            "Process Output Feed is different");
-        softAssert.assertEquals(newProcess.getOutputs().getOutputs().get(0).getInstance(),
-            getOutputs().getOutputs().get(0).getInstance(),
-            "Process Output Instance is different");
-        softAssert.assertAll();
-    }
-
-    /**
-     * Asserts equality of two processes.
-     */
-    public void assertEquals(ProcessMerlin process) {
-        LOGGER.info(String.format("Comparing General Properties: source: %n%s%n and process: %n%n%s",
-            Util.prettyPrintXml(toString()), Util.prettyPrintXml(process.toString())));
-        assertGeneralProperties(process);
-        assertInputValues(process);
-        assertOutputValues(process);
-        assertPropertiesInfo(process);
-        assertClustersEqual(getClusters().getClusters(), process.getClusters().getClusters());
-    }
-
-    /**
-     * Creates an empty process definition.
-     */
-    public static ProcessMerlin getEmptyProcess(ProcessMerlin process) {
-        ProcessMerlin draft = new ProcessMerlin(process.toString());
-        draft.setName("");
-        draft.setTags("");
-        draft.setACL(null);
-        draft.getInputs().getInputs().clear();
-        draft.getOutputs().getOutputs().clear();
-        draft.setRetry(null);
-        draft.clearProcessCluster();
-        draft.clearProperties();
-        draft.setFrequency(null);
-        draft.setOrder(null);
-        draft.setTimezone(null);
-        draft.setParallel(0);
-        Workflow workflow = new Workflow();
-        workflow.setName(null);
-        workflow.setPath(null);
-        workflow.setVersion(null);
-        workflow.setEngine(null);
-        draft.setWorkflow(null, null, null);
-        return draft;
-    }
-
-    /**
-     * Replaces old input by new input.
-     */
-    public void resetInputFeed(String inputName, String feedName) {
-        Input in1 = getInputs().getInputs().get(0);
-        getInputs().getInputs().clear();
-        Input in2 = new Input();
-        in2.setEnd(in1.getEnd());
-        in2.setFeed(feedName);
-        in2.setName(inputName);
-        in2.setPartition(in1.getPartition());
-        in2.setStart(in1.getStart());
-        in2.setOptional(in1.isOptional());
-        getInputs().getInputs().add(in2);
-    }
-
-    /**
-     * Replaces old output by new output.
-     */
-    public void resetOutputFeed(String outputName, String feedName) {
-        Output out1 = getOutputs().getOutputs().get(0);
-        getOutputs().getOutputs().clear();
-        Output out2 = new Output();
-        out2.setFeed(feedName);
-        out2.setName(outputName);
-        out2.setInstance(out1.getInstance());
-        getOutputs().getOutputs().add(out2);
-    }
-
-    /**
-     * Adds array of feeds as input.
-     */
-    public void addInputFeeds(String[] ipFeed) {
-        for(int i=0; i<ipFeed.length; i++){
-            addInputFeed(ipFeed[i], ipFeed[i]);
-        }
-    }
-
-    /**
-     * Adds array of feeds as output.
-     */
-    public void addOutputFeeds(String[] opFeed) {
-        for(int i=0; i<opFeed.length; i++){
-            addOutputFeed(opFeed[i], opFeed[i]);
-        }
-    }
-
-}
-
-


[22/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/FalconDocumentation.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/FalconDocumentation.twiki b/docs/src/site/twiki/FalconDocumentation.twiki
deleted file mode 100644
index 122435a..0000000
--- a/docs/src/site/twiki/FalconDocumentation.twiki
+++ /dev/null
@@ -1,777 +0,0 @@
----++ Contents
-   * <a href="#Architecture">Architecture</a>
-   * <a href="#Control_flow">Control flow</a>
-   * <a href="#Modes_Of_Deployment">Modes Of Deployment</a>
-   * <a href="#Entity_Management_actions">Entity Management actions</a>
-   * <a href="#Instance_Management_actions">Instance Management actions</a>
-   * <a href="#Retention">Retention</a>
-   * <a href="#Replication">Replication</a>
-   * <a href="#Cross_entity_validations">Cross entity validations</a>
-   * <a href="#Updating_process_and_feed_definition">Updating process and feed definition</a>
-   * <a href="#Handling_late_input_data">Handling late input data</a>
-   * <a href="#Idempotency">Idempotency</a>
-   * <a href="#Falcon_EL_Expressions">Falcon EL Expressions</a>
-   * <a href="#Lineage">Lineage</a>
-   * <a href="#Security">Security</a>
-   * <a href="#Recipes">Recipes</a>
-   * <a href="#Monitoring">Monitoring</a>
-   * <a href="#Email_Notification">Email Notification</a>
-   * <a href="#Backwards_Compatibility">Backwards Compatibility Instructions</a>
-   * <a href="#Proxyuser_support">Proxyuser support</a>
-   * <a href="#ImportExport">Data Import and Export</a>
-
----++ Architecture
-
----+++ Introduction
-Falcon is a feed and process management platform over hadoop. Falcon essentially transforms user's feed
-and process configurations into repeated actions through a standard workflow engine. Falcon by itself
-doesn't do any heavy lifting. All the functions and workflow state management requirements are delegated
-to the workflow scheduler. The only thing that Falcon maintains is the dependencies and relationship between
-these entities. This is adequate to provide integrated and seamless experience to the developers using
-the falcon platform.
-
----+++ Falcon Architecture - Overview
-<img src="Architecture.png" height="400" width="600" />
-
----+++ Scheduler
-Falcon system has picked Oozie as the default scheduler. However the system is open for integration with
-other schedulers. Lot of the data processing in hadoop requires scheduling to be based on both data availability
-as well as time. Oozie currently supports these capabilities off the shelf and hence the choice.
-
-While the use of Oozie works reasonably well, there are scenarios where Oozie scheduling is proving to be a limiting factor. In its current form, Falcon relies on Oozie for both scheduling and for workflow execution, due to which the scheduling is limited to time based/cron based scheduling with additional gating conditions on data availability. Also, this imposes restrictions on datasets being periodic/cyclic in nature. In order to offer better scheduling capabilities, Falcon comes with its own native scheduler. Refer to [[FalconNativeScheduler][Falcon Native Scheduler]] for details.
-
----+++ Control flow
-Though the actual responsibility of the workflow is with the scheduler (Oozie), Falcon remains in the
-execution path, by subscribing to messages that each of the workflow may generate. When Falcon generates a
-workflow in Oozie, it does so, after instrumenting the workflow with additional steps which includes messaging
-via JMS. Falcon system itself subscribes to these control messages and can perform actions such as retries,
-handling late input arrival etc.
-
-
----++++ Feed Schedule flow
-<img src="FeedSchedule.png" height="400" width="600" />
-
----++++ Process Schedule flow
-<img src="ProcessSchedule.png" height="400" width="600" />
-
-
-
----++ Modes Of Deployment
-There are two basic components of Falcon set up. Falcon Prism and Falcon Server.
-As the name suggests Falcon Prism splits the request it gets to the Falcon Servers. More details below:
-
----+++ Stand Alone Mode
-Stand alone mode is useful when the hadoop jobs and relevant data processing involves only one hadoop cluster.
-In this mode there is a single Falcon server that contacts Oozie to schedule jobs on Hadoop.
-All the process/feed requests like submit, schedule, suspend, kill etc. are sent to this server.
-For running falcon in this mode one should use the falcon which has been built using standalone option.
-
----+++ Distributed Mode
-Distributed mode is for multiple (colos) instances of hadoop clusters, and multiple workflow schedulers to handle them.
-In this mode falcon has 2 components: Prism and Server(s).
-Both Prism and servers have their own setup (runtime and startup properties) and their own config locations.
-In this mode Prism acts as a contact point for Falcon servers.
-While all commands are available through Prism, only read and instance api's are available through Server.
-Below are the requests that can be sent to each of these:
-
- Prism: submit, schedule, submitAndSchedule, Suspend, Resume, Kill, instance management
- Server: schedule, suspend, resume, instance management
- 
-As observed above submit and kill are kept exclusively as Prism operations to keep all the config stores in sync and to support feature of idempotency.
-Request may also be sent from prism but directed to a specific server using the option "-colo" from CLI or append the same in web request, if using API.
-
-When a cluster is submitted it is by default sent to all the servers configured in the prism.
-When is feed is SUBMIT / SCHEDULED request is only sent to the servers specified in the feed / process definitions. Servers are mentioned in the feed / process via CLUSTER tags in xml definition.
-
-Communication between prism and falcon server (for submit/update entity function) is secured over https:// using a client-certificate based auth. Prism server needs to present a valid client certificate for the falcon server to accept the action.
-
-Startup property file in both falcon & prism server need to be configured with the following configuration if TLS is enabled.
-* keystore.file
-* keystore.password
-
----++++ Prism Setup
-<img src="PrismSetup.png" height="400" width="600" />
- 
----+++ Configuration Store
-Configuration store is file system based store that the Falcon system maintains where the entity definitions
-are stored. File System used for the configuration store can either be a local file system or HDFS file system.
-It is recommended that the store be maintained outside of the system where Falcon is deployed. This is needed
-for handling issues relating to disk failures or other permanent failures of the system where Falcon is deployed.
-Configuration store also maintains an archive location where prior versions of the configuration or deleted
-configurations are maintained. They are never accessed by the Falcon system and they merely serve to track
-historical changes to the entity definitions.
-
----+++ Atomic Actions
-Often times when Falcon performs entity management actions, it may need to do several individual actions.
-If one of the action were to fail, then the system could be in an inconsistent state. To avoid this, all
-individual operations performed are recorded into a transaction journal. This journal is then used to undo
-the overall user action. In some cases, it is not possible to undo the action. In such cases, Falcon attempts
-to keep the system in an consistent state.
-
----+++ Storage
-Falcon introduces a new abstraction to encapsulate the storage for a given feed which can either be
-expressed as a path on the file system, File System Storage or a table in a catalog such as Hive, Catalog Storage.
-
-<verbatim>
-    <xs:choice minOccurs="1" maxOccurs="1">
-        <xs:element type="locations" name="locations"/>
-        <xs:element type="catalog-table" name="table"/>
-    </xs:choice>
-</verbatim>
-
-Feed should contain one of the two storage options. Locations on File System or Table in a Catalog.
-
----++++ File System Storage
-
-This is expressed as a location on the file system. Location specifies where the feed is available on this cluster.
-A location tag specifies the type of location like data, meta, stats and the corresponding paths for them.
-A feed should at least define the location for type data, which specifies the HDFS path pattern where the feed is
-generated periodically. ex: type="data" path="/projects/TrafficHourly/${YEAR}-${MONTH}-${DAY}/traffic"
-The granularity of date pattern in the path should be at least that of a frequency of a feed.
-
-<verbatim>
- <location type="data" path="/projects/falcon/clicks" />
- <location type="stats" path="/projects/falcon/clicksStats" />
- <location type="meta" path="/projects/falcon/clicksMetaData" />
-</verbatim>
-
----++++ Catalog Storage (Table)
-
-A table tag specifies the table URI in the catalog registry as:
-<verbatim>
-catalog:$database-name:$table-name#partition-key=partition-value);partition-key=partition-value);*
-</verbatim>
-
-This is modeled as a URI (similar to an ISBN URI). It does not have any reference to Hive or HCatalog. Its quite
-generic so it can be tied to other implementations of a catalog registry. The catalog implementation specified
-in the startup config provides implementation for the catalog URI.
-
-Top-level partition has to be a dated pattern and the granularity of date pattern should be at least that
-of a frequency of a feed.
-
-Examples:
-<verbatim>
-<table uri="catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR};region=${region}" />
-<table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-<table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-</verbatim>
-
-
----++ Entity Management actions
-All the following operation can also be done using [[restapi/ResourceList][Falcon's RESTful API]].
-
----+++ Submit
-Entity submit action allows a new cluster/feed/process to be setup within Falcon. Submitted entity is not
-scheduled, meaning it would simply be in the configuration store within Falcon. Besides validating against
-the schema for the corresponding entity being added, the Falcon system would also perform inter-field
-validations within the configuration file and validations across dependent entities.
-
----+++ List
-List all the entities within the falcon config store for the entity type being requested. This will include
-both scheduled and submitted entity configurations.
-
----+++ Dependency
-Returns the dependencies of the requested entity. Dependency list include both forward and backward
-dependencies (depends on & is dependent on). For example, a feed would show process that are dependent on the
-feed and the clusters that it depends on.
-
----+++ Schedule
-Feeds or Processes that are already submitted and present in the config store can be scheduled. Upon schedule,
-Falcon system wraps the required repeatable action as a bundle of oozie coordinators and executes them on the
-Oozie scheduler. (It is possible to extend Falcon to use an alternate workflow engine other than Oozie).
-Falcon overrides the workflow instance's external id in Oozie to reflect the process/feed and the nominal
-time. This external Id can then be used for instance management functions.
-
-The schedule copies the user specified workflow and library to a staging path, and the scheduler references the workflow
-and lib from the staging path.
-
----+++ Suspend
-This action is applicable only on scheduled entity. This triggers suspend on the oozie bundle that was
-scheduled earlier through the schedule function. No further instances are executed on a suspended process/feed.
-
----+++ Resume
-Puts a suspended process/feed back to active, which in turn resumes applicable oozie bundle.
-
----+++ Status
-Gets the current status of the entity.
-
----+++ Definition
-Gets the current entity definition as stored in the configuration store. Please note that user documentations
-in the entity will not be retained.
-
----+++ Delete
-Delete operation on the entity removes any scheduled activity on the workflow engine, besides removing the
-entity from the falcon configuration store. Delete operation on an entity would only succeed if there are
-no dependent entities on the deleted entity.
-
----+++ Update
-Update operation allows an already submitted/scheduled entity to be updated. Cluster update is currently
-not allowed. Feed update can cause cascading update to all the processes already scheduled. Process update triggers
-update in falcon if entity is updated. The following set of actions are performed in scheduler to realize an update:
-   * Update the old scheduled entity to set the end time to "now"
-   * Schedule as per the new process/feed definition with the start time as "now"
-
----++ Instance Management actions
-
-Instance Manager gives user the option to control individual instances of the process based on their instance start time (start time of that instance). Start time needs to be given in standard TZ format. Example: 01 Jan 2012 01:00 => 2012-01-01T01:00Z
-
-All the instance management operations (except running) allow single instance or list of instance within a Date range to be acted on. Make sure the dates are valid. i.e. are within the start and end time of process itself. 
-
-For every query in instance management the process name is a compulsory parameter. 
-
-Parameters -start and -end are used to mention the date range within which you want the instance to be operated upon. 
-
--start: using only "-start" without "-end" will conduct the desired operation only on single instance given by date along with start.
-
--end: "-end" can only be used along with "-start" . It corresponds to the end date till which instance need to operated upon. 
-
-   * 1. *status*: -status option via CLI can be used to get the status of a single or multiple instances. If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Along with the status of the instance log location is also returned.
-
-
-   * 2.	*running*: -running returns all the running instance of the process. It does not take any start or end dates but simply return all the instances in state RUNNING at that given time. 
-
-   * 3.	*rerun*: -rerun is the option that you will use most often from instance management. As the name suggest this option is used to rerun a particular instance or instances of the process. The rerun option reruns all parent workflow for the instance, which in turn rerun all the sub-workflows for it. This option is valid for any instance in terminal state, i.e. KILLED, SUCCEEDED, FAILED. User can also set properties in the request, which will give options what types of actions should be rerun like, only failed, run all etc. These properties are dependent on the workflow engine being used along with falcon.
-   
-   * 4. *suspend*: -suspend is used to suspend a instance or instances for the given process. This option pauses the parent workflow at the state, which it was in at the time of execution of this command. This command is similar to SUSPEND process command in functionality only difference being, SUSPEND process suspends all the instance whereas suspend instance suspend only that instance or instances in the range. 
-
-   * 5.	*resume*: -resume option is used to resume any instance that is in suspended state. (Note: due to a bug in oozie �resume option in some cases may not actually resume the suspended instance/ instances)
-   * 6. *kill*: -kill option can be used to kill an instance or multiple instances
-
-   * 7. *summary*: -summary option via CLI can be used to get the consolidated status of the instances between the specified time period. Each status along with the corresponding instance count are listed for each of the applicable colos.
-
-
-In all the cases where your request is syntactically correct but logically not, the instance / instances are returned with the same status as earlier. Example: trying to resume a KILLED / SUCCEEDED instance will return the instance with KILLED / SUCCEEDED, without actually performing any operation. This is so because only an instance in SUSPENDED state can be resumed. Same thing is valid for rerun a SUSPENDED or RUNNING options etc. 
-
----++ Retention
-In coherence with it's feed lifecycle management philosophy, Falcon allows the user to retain data in the system
-for a specific period of time for a scheduled feed. The user can specify the retention period in the respective
-feed/data xml in the following manner for each cluster the feed can belong to :
-<verbatim>
-<clusters>
-        <cluster name="corp" type="source">
-            <validity start="2012-01-30T00:00Z" end="2013-03-31T23:59Z"
-                      timezone="UTC" />
-            <retention limit="hours(10)" action="delete" /> 
-        </cluster>
- </clusters> 
-</verbatim>
-
-The 'limit' attribute can be specified in units of minutes/hours/days/months, and a corresponding numeric value can
-be attached to it. It essentially instructs the system to retain data till the time specified
-in the attribute spanning backwards in time, from now. Any data older than that is erased from the system. By default,
-Falcon runs retention jobs up to the cluster validity end time. This causes the instances created within the endTime
-and "endTime - retentionLimit" to be retained forever. If the users do not want to retain any instances of the
-feed past the cluster validity end time, user should set property "falcon.retention.keep.instances.beyond.validity"
-to false in runtime.properties.
-
-With the integration of Hive, Falcon also provides retention for tables in Hive catalog.
-
----+++ Example:
-If retention period is 10 hours, and the policy kicks in at time 't', the data retained by system is essentially the
-one after or equal to t-10h . Any data before t-10h is removed from the system.
-
-The 'action' attribute can attain values of DELETE/ARCHIVE. Based upon the tag value, the data eligible for removal is
-either deleted/archived.
-
----+++ NOTE: Falcon 0.1/0.2 releases support Delete operation only
-
----+++ When does retention policy come into play, aka when is retention really performed?
-
-Retention policy in Falcon kicks off on the basis of the time value specified by the user. Here are the basic rules:
-
-   * If the retention policy specified is less than 24 hours: In this event, the retention policy automatically kicks off every 6 hours.
-   * If the retention policy specified is more than 24 hours: In this event, the retention policy automatically kicks off every 24 hours.
-   * As soon as a feed is successfully scheduled: the retention policy is triggered immediately regardless of the current timestamp/state of the system.
-
-Relation between feed path and retention policy: Retention policy for a particular scheduled feed applies only to the eligible feed path
-specified in the feed xml. Any other paths that do not conform to the specified feed path are left unaffected by the retention policy.
-
----++ Replication
-Falcon's feed lifecycle management also supports Feed replication across different clusters out-of-the-box.
-Multiple source clusters and target clusters can be defined in feed definition. Falcon replicates the data using
-hadoop's distcp version 2 across different clusters whenever a feed is scheduled.
-
-The frequency at which the data is replicated is governed by the frequency specified in the feed definition.
-Ideally, the feeds data path should have the same granularity as that for frequency of the feed, i.e. if the frequency of the feed is hours(3), then the data path should be to level /${YEAR}/${MONTH}/${DAY}/${HOUR}. 
-<verbatim>
-    <clusters>
-        <cluster name="sourceCluster1" type="source" partition="${cluster.name}" delay="minutes(40)">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-        </cluster>
-        <cluster name="sourceCluster2" type="source" partition="COUNTRY/${cluster.name}">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-        </cluster>
-        <cluster name="backupCluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-        </cluster>
-    </clusters>
-</verbatim>
-
-If more than 1 source cluster is defined, then partition expression is compulsory, a partition can also have a constant.
-The expression is required to avoid copying data from different source location to the same target location,
-also only the data in the partition is considered for replication if it is present. The partitions defined in the
-cluster should be less than or equal to the number of partition declared in the feed definition.
-
-Falcon uses pull based replication mechanism, meaning in every target cluster, for a given source cluster,
-a coordinator is scheduled which pulls the data using distcp from source cluster. So in the above example,
-2 coordinators are scheduled in backupCluster, one which pulls the data from sourceCluster1 and another
-from sourceCluster2. Also, for every feed instance which is replicated Falcon sends a JMS message on success or
-failure of replication instance.
-
-Replication can be scheduled with the past date, the time frame considered for replication is the minimum
-overlapping window of start and end time of source and target cluster, ex: if s1 and e1 is the start and end time
-of source cluster respectively, and s2 and e2 of target cluster, then the coordinator is scheduled in
-target cluster with start time max(s1,s2) and min(e1,e2).
-
-A feed can also optionally specify the delay for replication instance in the cluster tag, the delay governs the
-replication instance delays. If the frequency of the feed is hours(2) and delay is hours(1), then the replication
-instance will run every 2 hours and replicates data with an offset of 1 hour, i.e. at 09:00 UTC, feed instance which
-is eligible for replication is 08:00; and 11:00 UTC, feed instance of 10:00 UTC is eligible and so on.
-
-If it is required to capture the feed replication metrics like TIMETAKEN, COPY, BYTESCOPIED, set the parameter "job.counter" to "true"
-in feed entity properties section. Captured metrics from instance will be populated to the GraphDB for display on UI.
-
-*Example:*
-<verbatim>
-<properties>
-        <property name="job.counter" value="true" />
-</properties>
-</verbatim>
-
----+++ Where is the feed path defined for File System Storage?
-
-It's defined in the feed xml within the location tag.
-
-*Example:*
-<verbatim>
-<locations>
-        <location type="data" path="/retention/testFolders/${YEAR}-${MONTH}-${DAY}" />
-</locations>
-</verbatim>
-
-Now, if the above path contains folders in the following fashion:
-
-/retention/testFolders/${YEAR}-${MONTH}-${DAY}
-/retention/testFolders/${YEAR}-${MONTH}/someFolder
-
-The feed retention policy would only act on the former and not the latter.
-
-Users may choose to override the feed path specific to a cluster, so every cluster
-may have a different feed path.
-*Example:*
-<verbatim>
-<clusters>
-        <cluster name="testCluster" type="source">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-       		<locations>
-        		<location type="data" path="/projects/falcon/clicks/${YEAR}-${MONTH}-${DAY}" />
-        		<location type="stats" path="/projects/falcon/clicksStats/${YEAR}-${MONTH}-${DAY}" />
-        		<location type="meta" path="/projects/falcon/clicksMetaData/${YEAR}-${MONTH}-${DAY}" />
-    		</locations>
-        </cluster>
-    </clusters>
-</verbatim>
-
----+++ Hive Table Replication
-
-With the integration of Hive, Falcon adds table replication of Hive catalog tables. Replication will be triggered
-for a partition when the partition is complete at the source.
-
-   * Falcon will use HCatalog (Hive) API to export the data for a given table and the partition,
-which will result in a data collection that includes metadata on the data's storage format, the schema,
-how the data is sorted, what table the data came from, and values of any partition keys from that table.
-   * Falcon will use discp tool to copy the exported data collection into the secondary cluster into a staging
-directory used by Falcon.
-   * Falcon will then import the data into HCatalog (Hive) using the HCatalog (Hive) API. If the specified table does
-not yet exist, Falcon will create it, using the information in the imported metadata to set defaults for the table
-such as schema, storage format, etc.
-   * The partition is not complete and hence not visible to users until all the data is committed on the secondary
-cluster, (no dirty reads)
-
-
----+++ Archival as Replication
-
-Falcon allows users to archive data from on-premice to cloud, either Azure WASB or S3.
-It uses the underlying replication for archiving data from source to target. The archival URI is
-specified as the overridden location for the target cluster.
-
-*Example:*
-<verbatim>
-    <clusters>
-        <cluster name="on-premise-cluster" type="source">
-            <validity start="2021-11-01T00:00Z" end="2021-12-31T00:00Z"/>
-        </cluster>
-        <cluster name="cloud-cluster" type="target">
-            <validity start="2011-11-01T00:00Z" end="2011-12-31T00:00Z"/>
-            <locations>
-                <location type="data"
-                          path="wasb://test@blah.blob.core.windows.net/data/${YEAR}-${MONTH}-${DAY}-${HOUR}"/>
-            </locations>
-        </cluster>
-    </clusters>
-</verbatim>
-
----+++ Relation between feed's retention limit and feed's late arrival cut off period:
-
-For reasons that are obvious, Falcon has an external validation that ensures that the user
-always specifies the feed retention limit to be more than the feed's allowed late arrival period.
-If this rule is violated by the user, the feed submission call itself throws back an error.
-
-
----++ Cross entity validations
-
-
----+++ Entity Dependencies in a nutshell
-<img src="EntityDependency.png" height="50" width="300" />
-
-
-The above schematic shows the dependencies between entities in Falcon. The arrow in above diagram
-points from a dependency to the dependent. 
-
-
-Let's just get one simple rule stated here, which we will keep referring to time and again while
-talking about entities: A dependency in the system cannot be removed unless all it's dependents are
-removed first. This holds true for all transitive dependencies also.
-
-Now, let's follow it up with a simple illustration of an Falcon Job:
-
-Let's consider a process P that refers to feed F1 as an input feed, and generates feed F2 as an
-output feed. These feeds/processes are supposed to be associated with a cluster C1.
-
-The order of submission of this job would be in the following order:
-
-C1->F1/F2(in any order)->P
-
-The order of removal of this job from the system is in the exact opposite order, i.e.:
-
-P->F1/F2(in any order)->C1
-
-Please note that there might be multiple process referring to a particular feed, or a single feed belonging
-to multiple clusters. In that event, any of the dependencies cannot be removed unless ALL of their dependents
-are removed first. Attempting to do so will result in an error message and a 400 Bad Request operation.
-
-
----+++ Other cross validations between entities in Falcon system
-
-*Cluster-Feed Cross validations:*
-
-   * The cluster(s) referenced by feed (inside the <clusters> tag) should be  present in the system at the time
-of submission. Any exception to this results in a feed submission failure. Note that a feed might be referring
-to more than a single cluster. The identifier for the same is the 'name' attribute for the individual cluster.
-
-*Example:*
-
-*Feed XML:*
-   
-<verbatim>
-   <clusters>
-        <cluster name="corp" type="source">
-            <validity start="2009-01-01T00:00Z" end="2012-12-31T23:59Z"
-                      timezone="UTC" />
-            <retention limit="months(6)" action="delete" />
-        </cluster>
-    </clusters>
-</verbatim>
-
-*Cluster corp's XML:*
-
-<verbatim>
-<cluster colo="gs" description="" name="corp" xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-</verbatim>
-
-*Cluster-Process Cross validations:*
-
-
-   * In a similar relationship to that of feed and a cluster, a process also refers to the relevant cluster by the
-'name' attribute. Any exception results in a process submission failure.
-
-
----+++ Example:
----+++ Process XML:
-<verbatim>
-<process name="agregator-coord16">
-    <cluster name="corp"/>....
-</verbatim>
----+++ Cluster corp's XML:
-<verbatim>
-<cluster colo="gs" description="" name="corp" xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-</verbatim>
-
-*Feed-Process Cross Validations:*
-
-
-1. The process <input> and feeds designated as input feeds for the job:
-
- For every feed referenced in the <input> tag in a process definition, following rules are applied
-when the process is due for submission:
-
-   * The feed having a value associated with the 'feed' attribute in input tag should be present in
-the system. The corresponding attribute in the feed definition is the 'name' attribute in the <feed> tag.
-
-*Example:*
-
-*Process xml:*
-
-<verbatim>
-<input end-instance="now(0,20)" start-instance="now(0,-60)"
-feed="raaw-logs16" name="inputData"/>
-</verbatim>
-
-*Feed xml:*
-<verbatim>
-<feed description="clicks log" name="raw-logs16"....
-</verbatim>
-
-   
-    * The time interpretation for corresponding tags indicating the start and end instances for a
-particular input feed in the process xml should lie well within the time span of the period specified in
-<validity> tag of the particular feed.
-
-*Example:*
-
-1. In the following scenario, process submission will result in an error:
-
-*Process XML:*
-<verbatim>
-<input end-instance="now(0,20)" start-instance="now(0,-60)"
-   feed="raw-logs16" name="inputData"/>
-</verbatim>
-*Feed XML:*
-<verbatim>
-<validity start="2009-01-01T00:00Z" end="2009-12-31T23:59Z".....
-</verbatim>
-Explanation: The process timelines for the feed range between a 40 minute interval between [-60m,-20m] from
-the current timestamp (which lets assume is 'today' as per the 'now' directive). However, the feed validity
-is between a 1 year period in 2009, which makes it anachronistic. 
-
-2. The following example would work just fine:
-
-*Process XML:*
-<verbatim>
-<input end-instance="now(0,20)" start-instance="now(0,-60)"
-   feed="raaw-logs16" name="inputData"/>
-</verbatim>
-*Feed XML:*
-<verbatim>
-validity start="2009-01-01T00:00Z" end="2012-12-31T23:59Z" .......
-</verbatim>
-since at the time of charting this document (03/03/2012), the feed validity is able to encapsulate the process
-input's start and end instances.
-
-
-Failure to follow any of the above rules would result in a process submission failure.
-
-*NOTE:* Even though the above check ensures that the timelines are not anachronistic, if the input data is not
-present in the system for the specified time period, the process can be submitted and scheduled, but all instances
-created would result in a WAITING state unless data is actually provided in the cluster.
-
-
-
----++ Updating process and feed definition
-Any changes in feed/process can be done by updating its definition. After the update, any new workflows which are to be scheduled after the update call will pick up the new changes. Feed/process name and start time can't be updated. Updating a process triggers updates to the workflow that is triggered in the workflow engine. Updating feed updates feed workflows like retention, replication etc. and also updates the processes that reference the feed.
-
-
----++ Handling late input data
-Falcon system can handle late arrival of input data and appropriately re-trigger processing for the affected
-instance. From the perspective of late handling, there are two main configuration parameters late-arrival cut-off
-and late-inputs section in feed and process entity definition that are central. These configurations govern
-how and when the late processing happens. In the current implementation (oozie based) the late handling is very
-simple and basic. The falcon system looks at all dependent input feeds for a process and computes the max late
-cut-off period. Then it uses a scheduled messaging framework, like the one available in Apache ActiveMQ or Java's !DelayQueue to schedule a message with a cut-off period, then after a cut-off period the message is dequeued and Falcon checks for changes in the feed data which is recorded in HDFS in latedata file by falcons "record-size" action, if it detects any changes then the workflow will be rerun with the new set of feed data.
-
-*Example:*
-For a process entity, the late rerun policy can be configured in the process definition.
-Falcon supports 3 policies, periodic, exp-backoff and final.
-Delay specifies, how often the feed data should be checked for changes, also one needs to 
-explicitly set the feed names in late-input which needs to be checked for late data.
-<verbatim>
-  <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="impression" workflow-path="hdfs://impression/late/workflow" />
-        <late-input input="clicks" workflow-path="hdfs://clicks/late/workflow" />
-   </late-process>
-</verbatim>
-
-*NOTE:* Feeds configured with table storage does not support late input data handling at this point. This will be
-made available in the near future.
-
-For a feed entity replication job, the default late data handling policy can be configured in the runtime.properties file.
-Since these properties are runtime.properties, they will take effect for all replication jobs completed subsequent to the change.
-<verbatim>
-  # Default configs to handle replication for late arriving feeds.
-  *.feed.late.allowed=true
-  *.feed.late.frequency=hours(3)
-  *.feed.late.policy=exp-backoff
-</verbatim>
-
-
----++ Idempotency
-All the operations in Falcon are Idempotent. That is if you make same request to the falcon server / prism again you will get a SUCCESSFUL return if it was SUCCESSFUL in the first attempt. For example, you submit a new process / feed and get SUCCESSFUL message return. Now if you run the same command / api request on same entity you will again get a SUCCESSFUL message. Same is true for other operations like schedule, kill, suspend and resume.
-Idempotency also by takes care of the condition when request is sent through prism and fails on one or more servers. For example prism is configured to send request to 3 servers. First user sends a request to SUBMIT a process on all 3 of them, and receives a response SUCCESSFUL from all of them. Then due to some issue one of the servers goes down, and user send a request to schedule the submitted process. This time he will receive a response with PARTIAL status and a FAILURE message from the server that has gone down. If the users check he will find the process would have been started and running on the 2 SUCCESSFUL servers. Now the issue with server is figured out and it is brought up. Sending the SCHEDULE request again through prism will result in a SUCCESSFUL response from prism as well as other three servers, but this time PROCESS will be SCHEDULED only on the server which had failed earlier and other two will keep running as before. 
- 
-
----++ Falcon EL Expressions
-
-
-Falcon expression language can be used in process definition for giving the start and end instance for various feeds.
-
-Before going into how to use falcon EL expressions it is necessary to understand what does instance and instance start time refer to with respect to Falcon.
-
-Lets consider a part of process definition below:
-
-<verbatim>
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<process name="testProcess">
-    <clusters>
-        <cluster name="corp">
-            <validity start="2010-01-02T01:00Z" end="2011-01-03T03:00Z" />
-        </cluster>
-    </clusters>
-   <parallel>2</parallel>
-   <order>LIFO</order>
-   <timeout>hours(3)</timeout>
-   <frequency>minutes(30)</frequency>
-
-  <inputs>
- <input end-instance="now(0,20)" start-instance="now(0,-60)"
-			feed="input-log" name="inputData"/>
- </inputs>
-<outputs>
-	<output instance="now(0,0)" feed="output-log"
-		name="outputData" />
-</outputs>
-...
-...
-...
-...
-</process>
-</verbatim>
-
-
-The above definition says that the process will start at 2nd of Jan 2010 at 1 am and will end at 3rd of Jan 2011 at 3 am on cluster corp. Also process will start a user-defined workflow (which we will call instance) every 30 mins.
-
-This means starting 2010-01-02T01:00Z every 30 mins a instance will start will run user defined workflow. Now if this workflow needs some input data and produce some output, user needs to give that in <inputs> and <outputs> tags. 
-Since the inputs that the process takes can be distributed over a wide range we use the limits by giving "start" and "end" instance for input. Output is only one location so only instance is given. 
-The timeout specifies, the how long a given instance should wait for input data before being terminated by the workflow engine.
-
-Coming back to instance start time, since a instance will start every 30 mins starting 2010-01-02T01:00Z, the time it is scheduled to start is called its instance time. For example first few instance time for above example are:
-
-
-<pre>Instance Number      instance start Time</pre>
-
-<pre>1			 2010-01-02T01:00Z</pre>
-<pre>2			 2010-01-02T01:30Z</pre>
-<pre>3			 2010-01-02T02:00Z</pre>
-<pre>4			 2010-01-02T02:30Z</pre>
-<pre>.				.</pre>
-<pre>.				.</pre>
-<pre>.				.</pre>
-<pre>.				.</pre>
-
-Now lets go to how to use expression language. Only thing to keep in mind is all EL evaluation are done based on the start time of that instance, and very instance will have different inputs / outputs based on the feed instance given in process definition.  
-
-All the parameters in various El can be both positive, zero or negative values. Positive values indicate so many units in future, zero means the base time EL has been resolved to, and negative values indicate corresponding units in past. 
-
-__Note: if no instance is created at the resolved time, then the instance immediately before it is considered.__
-
-Falcon currently support following ELs:
-
-
-   * 1.	*now(hours,minutes)*: now refer to the instance start time. Hours and minutes given are in reference with the start time of instance. For example now(-2,40)  corresponds to feed instance at -2 hr and +40 minutes i.e.  feed instance 80 mins before the instance start time. Id user would have given now(0,-80) it would have correspond to the same. 
-   * 2.	*today(hours,minutes)*: hours and minutes given in this EL corresponds to instance from the start day of instance start time. Ie. If instance start is at 2010-01-02T01:30Z  then today(-3,-20) will mean instance created at 2010-01-01T20:40 and today(3,20) will correspond to 2010-01-02T3:20Z. 
-
-   * 3.	*yesterday(hours,minutes)*: As the name suggest EL yesterday picks up feed instances with respect to start of day yesterday. Hours and minutes are added to the 00 hours starting yesterday, Example: yesterday(24,30) will actually correspond to 00:30 am of today, for 2010-01-02T01:30Z this would mean 2010-01-02:00:30 feed. 
-
-   * 7.	*lastYear(month,day,hour,minute)*: This is exactly similarly to currentYear in usage> only difference being start reference is taken to start of previous year. For example: lastYear(4,2,2,20) will correspond to feed instance created at 2009-05-03T02:20Z and lastYear(12,2,2,20) will correspond to feed at 2010-01-03T02:20Z.
-
-   * 4.	*currentMonth(day,hour,minute)*: Current month takes the reference to start of the month with respect to instance start time. One thing to keep in mind is that day is added to the first day of the month. So the value of day is the number of days you want to add to the first day of the month. For example: for instance start time 2010-01-12T01:30Z and El as currentMonth(3,2,40) will correspond to feed created at 2010-01-04T02:40Z and currentMonth(0,0,0) will mean 2010-01-01T00:00Z.
-
-   * 5.	*lastMonth(day,hour,minute)*: Parameters for lastMonth is same as currentMonth, only difference being the reference is shifted to one month back. For instance start 2010-01-12T01:30Z lastMonth(2,3,30) will correspond to feed instance at 2009-12-03:T03:30Z 
-
-   * 6.	*currentYear(month,day,hour,minute)*: The month,day,hour, minutes in the parameter are added with reference to the start of year of instance start time. For our example start time 2010-01-02:00:30 reference will go back to 2010-01-01:T00:00Z. Also similar to days, months are added to the 1st month that Jan. So currentYear(0,2,2,20) will mean 2010-01-03T02:20Z while currentYear(11,2,2,20) will mean 2010-12-03T02:20Z
-
-
-   * 7.	*lastYear(month,day,hour,minute)*: This is exactly similarly to currentYear in usage> only difference being start reference is taken to start of previous year. For example: lastYear(4,2,2,20) will corrospond to feed insatnce created at 2009-05-03T02:20Z and lastYear(12,2,2,20) will corrospond to feed at 2010-01-03T02:20Z.
-   
-   * 8. *latest(number of latest instance)*: This will simply make you input consider the number of latest available instance of the feed given as parameter. For example: latest(0) will consider the last available instance of feed, where as latest latest(-1) will consider second last available feed and latest(-3) will consider 4th last available feed.
-   
-   * 9.	*currentWeek(weekDayName,hour,minute)*: This is similar to currentMonth in the sense that it returns a relative time with respect to the instance start time, considering the day name provided as input as the start of the week. The day names can be one of SUN, MON, TUE, WED, THU, FRI, SAT.
-
-   * 10. *lastWeek(weekDayName,hour,minute)*: This is typically 7 days less than what the currentWeek returns for similar parameters.
-
-
----++ Lineage
-
-Falcon adds the ability to capture lineage for both entities and its associated instances. It
-also captures the metadata tags associated with each of the entities as relationships. The
-following relationships are captured:
-
-   * owner of entities - User
-   * data classification tags
-   * groups defined in feeds
-   * Relationships between entities
-      * Clusters associated with Feed and Process entity
-      * Input and Output feeds for a Process
-   * Instances refer to corresponding entities
-
-Lineage is exposed in 3 ways:
-
-   * REST API
-   * CLI
-   * Dashboard - Interactive lineage for Process instances
-
-This feature is enabled by default but could be disabled by removing the following from:
-<verbatim>
-config name: *.application.services
-config value: org.apache.falcon.metadata.MetadataMappingService
-</verbatim>
-
-Lineage is only captured for Process executions. A future release will capture lineage for
-lifecycle policies such as replication and retention.
-
----++Security
-
-Security is detailed in [[Security][Security]].
-
----++ Recipes
-
-Recipes is detailed in [[Recipes][Recipes]].
-
----++ Monitoring
-
-Monitoring and Operationalizing Falcon is detailed in [[Operability][Operability]].
-
----++ Email Notification
-Notification for instance completion in Falcon is defined in [[FalconEmailNotification][Falcon Email Notification]].
-
----++ Backwards Compatibility
-
-Backwards compatibility instructions are [[Compatibility][detailed here.]]
-
----++ Proxyuser support
-Falcon supports impersonation or proxyuser functionality (identical to Hadoop proxyuser capabilities and conceptually
-similar to Unix 'sudo').
-
-Proxyuser enables Falcon clients to submit entities on behalf of other users. Falcon will utilize Hadoop core's hadoop-auth
-module to implement this functionality.
-
-Because proxyuser is a powerful capability, Falcon provides the following restriction capabilities (similar to Hadoop):
-
-   * Proxyuser is an explicit configuration on per proxyuser user basis.
-   * A proxyuser user can be restricted to impersonate other users from a set of hosts.
-   * A proxyuser user can be restricted to impersonate users belonging to a set of groups.
-
-There are 2 configuration properties needed in runtime properties to set up a proxyuser:
-   * falcon.service.ProxyUserService.proxyuser.#USER#.hosts: hosts from where the user #USER# can impersonate other users.
-   * falcon.service.ProxyUserService.proxyuser.#USER#.groups: groups the users being impersonated by user #USER# must belong to.
-
-If these configurations are not present, impersonation will not be allowed and connection will fail. If more lax security is preferred,
-the wildcard value * may be used to allow impersonation from any host or of any user, although this is recommended only for testing/development.
-
--doAs option via  CLI or doAs query parameter can be appended if using API to enable impersonation.
-
----++ ImportExport
-
-Data Import and Export is detailed in [[ImportExport][Data Import and Export]].
-
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/FalconEmailNotification.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/FalconEmailNotification.twiki b/docs/src/site/twiki/FalconEmailNotification.twiki
deleted file mode 100644
index 25abdd2..0000000
--- a/docs/src/site/twiki/FalconEmailNotification.twiki
+++ /dev/null
@@ -1,29 +0,0 @@
----++Falcon Email Notification
-
-Falcon Email notification allows sending email notifications when scheduled feed/process instances complete.
-Email notification in feed/process entity can be defined as follows:
-<verbatim>
-<process name="[process name]">
-    ...
-    <notification type="email" to="bob@xyz.com,tom@xyz.com"/>
-    ...
-</process>
-</verbatim>
-
-   *  *type*    - specifies about the type of notification. *Note:* Currently "email" notification type is supported.
-   *  *to*  - specifies the address to send notifications to; multiple recipients may be provided as a comma-separated list.
-
-
-Falcon email notification requires some SMTP server configuration to be defined in startup.properties. Following are the values
-it looks for:
-   * *falcon.email.smtp.host*   - The host where the email action may find the SMTP server (localhost by default).
-   * *falcon.email.smtp.port*   - The port to connect to for the SMTP server (25 by default).
-   * *falcon.email.from.address*    - The from address to be used for mailing all emails (falcon@localhost by default).
-   * *falcon.email.smtp.auth*   - Boolean property that specifies if authentication is to be done or not. (false by default).
-   * *falcon.email.smtp.user*   - If authentication is enabled, the username to login as (empty by default).
-   * *falcon.email.smtp.password*   - If authentication is enabled, the username's password (empty by default).
-
-
-
-Also ensure that email notification plugin is enabled in startup.properties to send email notifications:
-   * *monitoring.plugins*   - org.apache.falcon.plugin.EmailNotificationPlugin,org.apache.falcon.plugin.DefaultMonitoringPlugin
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/FalconNativeScheduler.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/FalconNativeScheduler.twiki b/docs/src/site/twiki/FalconNativeScheduler.twiki
deleted file mode 100644
index 9ffc5e9..0000000
--- a/docs/src/site/twiki/FalconNativeScheduler.twiki
+++ /dev/null
@@ -1,213 +0,0 @@
----+ Falcon Native Scheduler
-
----++ Overview
-Falcon has been using Oozie as its scheduling engine.  While the use of Oozie works reasonably well, there are scenarios where Oozie scheduling is proving to be a limiting factor. In its current form, Falcon relies on Oozie for both scheduling and for workflow execution, due to which the scheduling is limited to time based/cron based scheduling with additional gating conditions on data availability. Also, this imposes restrictions on datasets being periodic in nature. In order to offer better scheduling capabilities, Falcon comes with its own native scheduler. 
-
----++ Capabilities
-The native scheduler will offer the capabilities offered by Oozie co-ordinator and more. The native scheduler will be built and released over the next few releases of Falcon giving users an opportunity to use it and provide feedback.
-
-Currently, the native scheduler offers the following capabilities:
-   1. Submit and schedule a Falcon process that runs periodically (without data dependency) - It could be a PIG script, oozie workflow, Hive (all the engine types currently supported).
-   1. Monitor/Query/Modify the scheduled process - All applicable entity APIs and instance APIs should work as it does now.  Falcon provides data management functions for feeds declaratively. It allows users to represent feed locations as time-based partition directories on HDFS containing files.
-
-*NOTE: Execution order is FIFO. LIFO and LAST_ONLY are not supported yet.*
-
-In the near future, Falcon scheduler will provide feature parity with Oozie scheduler and in subsequent releases will provide the following features:
-   * Periodic, cron-based, calendar-based scheduling.
-   * Data availability based scheduling.
-   * External trigger/notification based scheduling.
-   * Support for periodic/a-periodic datasets.
-   * Support for optional/mandatory datasets. Option to specify minumum/maximum/exactly-N instances of data to consume.
-   * Handle dependencies across entities during re-run.
-
----++ Configuring Native Scheduler
-You can enable native scheduler by making changes to __$FALCON_HOME/conf/startup.properties__ as follows. You will need to restart Falcon Server for the changes to take effect.
-<verbatim>
-*.dag.engine.impl=org.apache.falcon.workflow.engine.OozieDAGEngine
-*.application.services=org.apache.falcon.security.AuthenticationInitializationService,\
-                        org.apache.falcon.workflow.WorkflowJobEndNotificationService, \
-                        org.apache.falcon.service.ProcessSubscriberService,\
-                        org.apache.falcon.service.FeedSLAMonitoringService,\
-                        org.apache.falcon.service.LifecyclePolicyMap,\
-                        org.apache.falcon.state.store.service.FalconJPAService,\
-                        org.apache.falcon.entity.store.ConfigurationStore,\
-                        org.apache.falcon.rerun.service.RetryService,\
-                        org.apache.falcon.rerun.service.LateRunService,\
-                        org.apache.falcon.metadata.MetadataMappingService,\
-                        org.apache.falcon.service.LogCleanupService,\
-                        org.apache.falcon.service.GroupsService,\
-                        org.apache.falcon.service.ProxyUserService,\
-                        org.apache.falcon.notification.service.impl.JobCompletionService,\
-                        org.apache.falcon.notification.service.impl.SchedulerService,\
-                        org.apache.falcon.notification.service.impl.AlarmService,\
-                        org.apache.falcon.notification.service.impl.DataAvailabilityService,\
-                        org.apache.falcon.execution.FalconExecutionService
-</verbatim>
-
----+++ Making the Native Scheduler the default scheduler
-To ensure backward compatibility, even when the native scheduler is enabled, the default scheduler is still Oozie. This means users will be scheduling entities on Oozie scheduler, by default. They will need to explicitly specify the scheduler as native, if they wish to schedule entities using native scheduler. 
-
-<a href="#Scheduling_new_entities_on_Native_Scheduler">This section</a> has more details on how to schedule on either of the schedulers. 
-
-If you wish to make the Falcon Native Scheduler your default scheduler and remove Oozie as the scheduler, set the following property in __$FALCON_HOME/conf/startup.properties__
-<verbatim>
-## If you wish to use Falcon native scheduler as your default scheduler, set the workflow engine to FalconWorkflowEngine instead of OozieWorkflowEngine. ##
-*.workflow.engine.impl=org.apache.falcon.workflow.engine.FalconWorkflowEngine
-</verbatim>
-
----+++ Configuring the state store for Native Scheduler
-You can configure statestore by making changes to __$FALCON_HOME/conf/statestore.properties__ as follows. You will need to restart Falcon Server for the changes to take effect.
-
-Falcon Server needs to maintain state of the entities and instances in a persistent store for the system to be recoverable. Since Prism only federates, it does not need to maintain any state information. Following properties need to be set in statestore.properties of Falcon Servers:
-<verbatim>
-######### StateStore Properties #####
-*.falcon.state.store.impl=org.apache.falcon.state.store.jdbc.JDBCStateStore
-*.falcon.statestore.jdbc.driver=org.apache.derby.jdbc.EmbeddedDriver
-*.falcon.statestore.jdbc.url=jdbc:derby:data/falcon.db
-# StateStore credentials file where username,password and other properties can be stored securely.
-# Set this credentials file permission 400 and make sure user who starts falcon should only have read permission.
-# Give Absolute path to credentials file along with file name or put in classpath with file name statestore.credentials.
-# Credentials file should be present either in given location or class path, otherwise falcon won't start.
-*.falcon.statestore.credentials.file=
-*.falcon.statestore.jdbc.username=sa
-*.falcon.statestore.jdbc.password=
-*.falcon.statestore.connection.data.source=org.apache.commons.dbcp.BasicDataSource
-# Maximum number of active connections that can be allocated from this pool at the same time.
-*.falcon.statestore.pool.max.active.conn=10
-*.falcon.statestore.connection.properties=
-# Indicates the interval (in milliseconds) between eviction runs.
-*.falcon.statestore.validate.db.connection.eviction.interval=300000
-## The number of objects to examine during each run of the idle object evictor thread.
-*.falcon.statestore.validate.db.connection.eviction.num=10
-## Creates Falcon DB.
-## If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-## If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-*.falcon.statestore.create.db.schema=true
-</verbatim> 
-
-The _*.falcon.statestore.jdbc.url_ property in statestore.properties determines the DB and data location. All other properties are common across RDBMS.
-
-*NOTE : Although multiple Falcon Servers can share a DB (not applicable for Derby DB), it is recommended that you have different DBs for different Falcon Servers for better performance.*
-
-You will need to create the state DB and tables before starting the Falcon Server. To create tables, a tool comes bundled with the Falcon installation. You can use the _falcon-db.sh_ script to create tables in the DB. The script needs to be run only for Falcon Servers and can be run by any user that has execute permission on the script. The script picks up the DB connection details from __$FALCON_HOME/conf/statestore.properties__. Ensure that you have granted the right privileges to the user mentioned in statestore.properties_, so the tables can be created.
-
-You can use the help command to get details on the sub-commands supported:
-<verbatim>
-./bin/falcon-db.sh help
-Hadoop home is set, adding libraries from '/Users/pallavi.rao/falcon/hadoop-2.6.0/bin/hadoop classpath' into falcon classpath
-usage: 
-      Falcon DB initialization tool currently supports Derby DB/ Mysql
-
-      falcondb help : Display usage for all commands or specified command
-
-      falcondb version : Show Falcon DB version information
-
-      falcondb create <OPTIONS> : Create Falcon DB schema
-                      -run             Confirmation option regarding DB schema creation/upgrade
-                      -sqlfile <arg>   Generate SQL script instead of creating/upgrading the DB
-                                       schema
-
-      falcondb upgrade <OPTIONS> : Upgrade Falcon DB schema
-                       -run             Confirmation option regarding DB schema creation/upgrade
-                       -sqlfile <arg>   Generate SQL script instead of creating/upgrading the DB
-                                        schema
-
-</verbatim>
-Currently, MySQL, postgreSQL and Derby are supported as state stores. We may extend support to other DBs in the future. Falcon has been tested against MySQL v5.5 and PostgreSQL v9.5. If you are using MySQL ensure you also copy mysql-connector-java-<version>.jar under __$FALCON_HOME/server/webapp/falcon/WEB-INF/lib__ and __$FALCON_HOME/client/lib__
-
----++++ Using Derby as the State Store
-Using Derby is ideal for QA and staging setup. Falcon comes bundled with a Derby connector and no explicit setup is required (although you can set it up) in terms creating the DB or tables.
-For example,
- <verbatim> *.falcon.statestore.jdbc.url=jdbc:derby:data/falcon.db;create=true </verbatim>
-
- tells Falcon to use the Derby JDBC connector, with data directory, $FALCON_HOME/data/ and DB name 'falcon'. If _create=true_ is specified, you will not need to create a DB up front; a database will be created if it does not exist.
-
----++++ Using MySQL as the State Store
-The jdbc.url property in statestore.properties determines the DB and data location.
-For example,
- <verbatim> *.falcon.statestore.jdbc.url=jdbc:mysql://localhost:3306/falcon </verbatim>
-
- tells Falcon to use the MySQL JDBC connector, which is accessible @localhost:3306, with DB name 'falcon'.
-
----++ Scheduling new entities on Native Scheduler
-To schedule an entity (currently only process is supported) using the native scheduler, you need to specify the scheduler in the schedule command as shown below:
-<verbatim>
-$FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule -properties falcon.scheduler:native
-</verbatim>
-
-If Oozie is configured as the default scheduler, you can skip the scheduler option or explicitly set it to _oozie_, as shown below:
-<verbatim>
-$FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule
-OR
-$FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule -properties falcon.scheduler:oozie
-</verbatim>
-
-If the native scheduler is configured as the default scheduler, then, you can omit the scheduler option, as shown below:
-<verbatim>
-$FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule 
-</verbatim>
-
----++ Migrating entities from Oozie Scheduler to Native Scheduler
-Currently, user will have to delete and re-create entities in order to move across schedulers. Attempting to schedule an already scheduled entity on a different scheduler will result in an error. Note that the history of instances prior to scheduling on native scheduler will not be available via the instance APIs. However, user can retrieve that information using metadata APIs. Native scheduler must be enabled before migrating entities to native scheduler.
-
-<a href="#Configuring_Native_Scheduler">Configuring Native Scheduler</a> has more details on how to enable native scheduler.
-
----+++ Migrating from Oozie to Native Scheduler
-   * Delete the entity (process). 
-<verbatim>$FALCON_HOME/bin/falcon entity -type process -name <process name> -delete </verbatim>
-   * Submit the entity (process) with start time from where the Oozie scheduler left off. 
-<verbatim>$FALCON_HOME/bin/falcon entity -type process -submit <path to process xml> </verbatim>
-   * Schedule the entity on native scheduler. 
-<verbatim> $FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule -properties falcon.scheduler:native </verbatim>
-
----+++ Reverting to Oozie from Native Scheduler
-   * Delete the entity (process). 
-<verbatim>$FALCON_HOME/bin/falcon entity -type process -name <process name> -delete </verbatim>
-   * Submit the entity (process) with start time from where the Native scheduler left off. 
-<verbatim>$FALCON_HOME/bin/falcon entity -type process -submit <path to process xml> </verbatim>
-   * Schedule the entity on the default scheduler (Oozie).
- <verbatim> $FALCON_HOME/bin/falcon entity -type process -name <process name> -schedule </verbatim>
-
----+++ Differences in API responses between Oozie and Native Scheduler
-Most API responses are similar whether the entity is scheduled via Oozie or via Native scheduler. However, there are a few exceptions and those are listed below.
----++++ Rerun API
-When a user performs a rerun using Oozie scheduler, Falcon directly reruns the workflow on Oozie and the instance will be moved to 'RUNNING'.
-
-Example response:
-<verbatim>
-$ falcon instance -rerun processMerlinOozie -start 2016-01-08T12:13Z -end 2016-01-08T12:15Z
-Consolidated Status: SUCCEEDED
-
-Instances:
-Instance		Cluster		SourceCluster		Status		Start		End		Details					Log
------------------------------------------------------------------------------------------------
-2016-01-08T12:13Z	ProcessMultipleClustersTest-corp-9706f068	-	RUNNING	2016-01-08T13:03Z	2016-01-08T13:03Z	-	http://8RPCG32.corp.inmobi.com:11000/oozie?job=0001811-160104160825636-oozie-oozi-W
-2016-01-08T12:13Z	ProcessMultipleClustersTest-corp-0b270a1d	-	RUNNING	2016-01-08T13:03Z	2016-01-08T13:03Z	-	http://lda01:11000/oozie?job=0002247-160104115615658-oozie-oozi-W
-
-Additional Information:
-Response: ua1/RERUN
-ua2/RERUN
-Request Id: ua1/871377866@qtp-630572412-35 - 7190c4c8-bacb-4639-8d48-c9e639f544da
-ua2/1554129706@qtp-536122141-13 - bc18127b-1bf8-4ea1-99e6-b1f10ba3a441
-</verbatim>
-
-However, when a user performs a rerun on native scheduler, the instance is scheduled again. This is done intentionally so as to not violate the number of instances running in parallel.  Hence, the user will see the status of the instance as 'READY'.
-
-Example response:
-<verbatim>
-$ falcon instance -rerun ProcessMultipleClustersTest-agregator-coord16-8f55f59b -start 2016-01-08T12:13Z -end 2016-01-08T12:15Z
-Consolidated Status: SUCCEEDED
-
-Instances:
-Instance		Cluster		SourceCluster		Status		Start		End		Details					Log
------------------------------------------------------------------------------------------------
-2016-01-08T12:13Z	ProcessMultipleClustersTest-corp-9706f068	-	READY	2016-01-08T13:03Z	2016-01-08T13:03Z	-	http://8RPCG32.corp.inmobi.com:11000/oozie?job=0001812-160104160825636-oozie-oozi-W
-
-2016-01-08T12:13Z	ProcessMultipleClustersTest-corp-0b270a1d	-	READY	2016-01-08T13:03Z	2016-01-08T13:03Z	-	http://lda01:11000/oozie?job=0002248-160104115615658-oozie-oozi-W
-
-Additional Information:
-Response: ua1/RERUN
-ua2/RERUN
-Request Id: ua1/871377866@qtp-630572412-35 - 8d118d4d-c0ef-4335-a9af-10364498ec4f
-ua2/1554129706@qtp-536122141-13 - c2a3fc50-8b05-47ce-9c85-ca432b96d923
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/HDFSDR.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/HDFSDR.twiki b/docs/src/site/twiki/HDFSDR.twiki
deleted file mode 100644
index 1c1e3f5..0000000
--- a/docs/src/site/twiki/HDFSDR.twiki
+++ /dev/null
@@ -1,34 +0,0 @@
----+ HDFS DR Recipe
----++ Overview
-Falcon supports HDFS DR recipe to replicate data from source cluster to destination cluster.
-
----++ Usage
----+++ Setup cluster definition.
-   <verbatim>
-    $FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-   </verbatim>
-
----+++ Update recipes properties
-   Copy HDFS replication recipe properties, workflow and template file from $FALCON_HOME/data-mirroring/hdfs-replication to the accessible
-   directory path or to the recipe directory path (*falcon.recipe.path=<recipe directory path>*). *"falcon.recipe.path"* must be specified
-   in Falcon conf client.properties. Now update the copied recipe properties file with required attributes to replicate data from source cluster to
-   destination cluster for HDFS DR.
-
----+++ Submit HDFS DR recipe
-
-   After updating the recipe properties file with required attributes in directory path or in falcon.recipe.path,
-   there are two ways of submitting the HDFS DR recipe:
-
-   * 1. Specify Falcon recipe properties file through recipe command line.
-   <verbatim>
-    $FALCON_HOME/bin/falcon recipe -name hdfs-replication -operation HDFS_REPLICATION
-    -properties /cluster/hdfs-replication.properties
-   </verbatim>
-
-   * 2. Use Falcon recipe path specified in Falcon conf client.properties .
-   <verbatim>
-    $FALCON_HOME/bin/falcon recipe -name hdfs-replication -operation HDFS_REPLICATION
-   </verbatim>
-
-
-*Note:* Recipe properties file, workflow file and template file name must match to the recipe name, it must be unique and in the same directory.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/HiveDR.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/HiveDR.twiki b/docs/src/site/twiki/HiveDR.twiki
deleted file mode 100644
index a8f6aee..0000000
--- a/docs/src/site/twiki/HiveDR.twiki
+++ /dev/null
@@ -1,74 +0,0 @@
----+Hive Disaster Recovery
-
-
----++Overview
-Falcon provides feature to replicate Hive metadata and data events from source cluster
-to destination cluster. This is supported for secure and unsecure cluster through Falcon Recipes.
-
-
----++Prerequisites
-Following is the prerequisites to use Hive DR
-
-   * *Hive 1.2.0+*
-   * *Oozie 4.2.0+*
-
-*Note:* Set following properties in hive-site.xml for replicating the Hive events on source and destination Hive cluster:
-<verbatim>
-    <property>
-        <name>hive.metastore.event.listeners</name>
-        <value>org.apache.hive.hcatalog.listener.DbNotificationListener</value>
-        <description>event listeners that are notified of any metastore changes</description>
-    </property>
-
-    <property>
-        <name>hive.metastore.dml.events</name>
-        <value>true</value>
-    </property>
-</verbatim>
-
----++ Usage
----+++ Bootstrap
-   Perform initial bootstrap of Table and Database from source cluster to destination cluster
-   * *Database Bootstrap*
-     For bootstrapping DB replication, first destination DB should be created. This step is expected,
-     since DB replication definitions can be set up by users only on pre-existing DB’s. Second, Export all tables in
-     the source db and Import it in the destination db, as described in Table bootstrap.
-
-   * *Table Bootstrap*
-     For bootstrapping table replication, essentially after having turned on the !DbNotificationListener
-     on the source db, perform an Export of the table, distcp the Export over to the destination
-     warehouse and do an Import over there. Check the following [[https://cwiki.apache.org/confluence/display/Hive/LanguageManual+ImportExport][Hive Export-Import]] for syntax details
-     and examples.
-     This will set up the destination table so that the events on the source cluster that modify the table
-     will then be replicated.
-
----+++ Setup cluster definition
-   <verbatim>
-    $FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-   </verbatim>
-
----+++ Update recipes properties
-   Copy Hive DR recipe properties, workflow and template file from $FALCON_HOME/data-mirroring/hive-disaster-recovery to the accessible
-   directory path or to the recipe directory path (*falcon.recipe.path=<recipe directory path>*). *"falcon.recipe.path"* must be specified
-   in Falcon conf client.properties. Now update the copied recipe properties file with required attributes to replicate metadata and data from source cluster to
-   destination cluster for Hive DR.
-
----+++ Submit Hive DR recipe
-   After updating the recipe properties file with required attributes in directory path or in falcon.recipe.path,
-   there are two ways of submitting the Hive DR recipe:
-
-   * 1. Specify Falcon recipe properties file through recipe command line.
-   <verbatim>
-       $FALCON_HOME/bin/falcon recipe -name hive-disaster-recovery -operation HIVE_DISASTER_RECOVERY
-       -properties /cluster/hive-disaster-recovery.properties
-   </verbatim>
-
-   * 2. Use Falcon recipe path specified in Falcon conf client.properties .
-   <verbatim>
-       $FALCON_HOME/bin/falcon recipe -name hive-disaster-recovery -operation HIVE_DISASTER_RECOVERY
-   </verbatim>
-
-
-*Note:*
-   * Recipe properties file, workflow file and template file name must match to the recipe name, it must be unique and in the same directory.
-   * If kerberos security is enabled on cluster, use the secure templates for Hive DR from $FALCON_HOME/data-mirroring/hive-disaster-recovery .

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/HiveIntegration.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/HiveIntegration.twiki b/docs/src/site/twiki/HiveIntegration.twiki
deleted file mode 100644
index 688305d..0000000
--- a/docs/src/site/twiki/HiveIntegration.twiki
+++ /dev/null
@@ -1,372 +0,0 @@
----+ Hive Integration
-
----++ Overview
-Falcon provides data management functions for feeds declaratively. It allows users to represent feed locations as
-time-based partition directories on HDFS containing files.
-
-Hive provides a simple and familiar database like tabular model of data management to its users,
-which are backed by HDFS. It supports two classes of tables, managed tables and external tables.
-
-Falcon allows users to represent feed location as Hive tables. Falcon supports both managed and external tables
-and provide data management services for tables such as replication, eviction, archival, etc. Falcon will notify
-HCatalog as a side effect of either acquiring, replicating or evicting a data set instance and adds the
-missing capability of HCatalog table replication.
-
-In the near future, Falcon will allow users to express pipeline processing in Hive scripts
-apart from Pig and Oozie workflows.
-
-
----++ Assumptions
-   * Date is a mandatory first-level partition for Hive tables
-      * Data availability triggers are based on date pattern in Oozie
-   * Tables must be created in Hive prior to adding it as a Feed in Falcon.
-      * Duplicating this in Falcon will create confusion on the real source of truth. Also propagating schema changes
-    between systems is a hard problem.
-   * Falcon does not know about the encoding of the data and data should be in HCatalog supported format.
-
----++ Configuration
-Falcon provides a system level option to enable Hive integration. Falcon must be configured with an implementation
-for the catalog registry. The default implementation for Hive is shipped with Falcon.
-
-<verbatim>
-catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
-</verbatim>
-
-
----++ Incompatible changes
-Falcon depends heavily on data-availability triggers for scheduling Falcon workflows. Oozie must support
-data-availability triggers based on HCatalog partition availability. This is only available in oozie 4.x.
-
-Hence, Falcon for Hive support needs Oozie 4.x.
-
-
----++ Oozie Shared Library setup
-Falcon post Hive integration depends heavily on the [[http://oozie.apache.org/docs/4.0.1/WorkflowFunctionalSpec.html#a17_HDFS_Share_Libraries_for_Workflow_Applications_since_Oozie_2.3][shared library feature of Oozie]].
-Since the sheer number of jars for HCatalog, Pig and Hive are in the many 10s in numbers, its quite daunting to
-redistribute the dependent jars from Falcon.
-
-[[http://oozie.apache.org/docs/4.0.1/DG_QuickStart.html#Oozie_Share_Lib_Installation][This is a one time effort in Oozie setup and is quite straightforward.]]
-
-
----++ Approach
-
----+++ Entity Changes
-
-   * Cluster DSL will have an additional registry-interface section, specifying the endpoint for the
-HCatalog server. If this is absent, no HCatalog publication will be done from Falcon for this cluster.
-      <verbatim>thrift://hcatalog-server:port</verbatim>
-   * Feed DSL will allow users to specify the URI (location) for HCatalog tables as:
-      <verbatim>catalog:database_name:table_name#partitions(key=value?)*</verbatim>
-   * Failure to publish to HCatalog will be retried (configurable # of retires) with back off. Permanent failures
-   after all the retries are exhausted will fail the Falcon workflow
-
----+++ Eviction
-
-   * Falcon will construct DDL statements to filter candidate partitions eligible for eviction drop partitions
-   * Falcon will construct DDL statements to drop the eligible partitions
-   * Additionally, Falcon will nuke the data on HDFS for external tables
-
-
----+++ Replication
-
-   * Falcon will use HCatalog (Hive) API to export the data for a given table and the partition,
-which will result in a data collection that includes metadata on the data's storage format, the schema,
-how the data is sorted, what table the data came from, and values of any partition keys from that table.
-   * Falcon will use discp tool to copy the exported data collection into the secondary cluster into a staging
-directory used by Falcon.
-   * Falcon will then import the data into HCatalog (Hive) using the HCatalog (Hive) API. If the specified table does
-not yet exist, Falcon will create it, using the information in the imported metadata to set defaults for the
-table such as schema, storage format, etc.
-   * The partition is not complete and hence not visible to users until all the data is committed on the secondary
-cluster, (no dirty reads)
-   * Data collection is staged by Falcon and retries for copy continues from where it left off.
-   * Failure to register with Hive will be retired. After all the attempts are exhausted,
-the data will be cleaned up by Falcon.
-
-
----+++ Security
-The user owns all data managed by Falcon. Falcon runs as the user who submitted the feed. Falcon will authenticate
-with HCatalog as the end user who owns the entity and the data.
-
-For Hive managed tables, the table may be owned by the end user or “hive”. For “hive” owned tables,
-user will have to configure the feed as “hive”.
-
-
----++ Load on HCatalog from Falcon
-It generally depends on the frequency of the feeds configured in Falcon and how often data is ingested, replicated,
-or processed.
-
-
----++ User Impact
-   * There should not be any impact to user due to this integration
-   * Falcon will be fully backwards compatible 
-   * Users have a choice to either choose storage based on files on HDFS as they do today or use HCatalog for
-accessing the data in tables
-
-
----++ Known Limitations
-
----+++ Oozie
-
-   * Falcon with Hadoop 1.x requires copying guava jars manually to sharelib in oozie. Hadoop 2.x ships this.
-   * hcatalog-pig-adapter needs to be copied manually to oozie sharelib.
-<verbatim>
-bin/hadoop dfs -copyFromLocal $LFS/share/lib/hcatalog/hcatalog-pig-adapter-0.5.0-incubating.jar share/lib/hcatalog
-</verbatim>
-   * Oozie 4.x with Hadoop-2.x
-Replication jobs are submitted to oozie on the destination cluster. Oozie runs a table export job
-on RM on source cluster. Oozie server on the target cluster must be configured with source hadoop
-configs else jobs fail with errors on secure and non-secure clusters as below:
-<verbatim>
-org.apache.hadoop.security.token.SecretManager$InvalidToken: Password not found for ApplicationAttempt appattempt_1395965672651_0010_000002
-</verbatim>
-
-Make sure all oozie servers that falcon talks to has the hadoop configs configured in oozie-site.xml
-<verbatim>
-<property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf,arpit-new-falcon-1.cs1cloud.internal:8020=/etc/hadoop-1,arpit-new-falcon-1.cs1cloud.internal:8032=/etc/hadoop-1,arpit-new-falcon-2.cs1cloud.internal:8020=/etc/hadoop-2,arpit-new-falcon-2.cs1cloud.internal:8032=/etc/hadoop-2,arpit-new-falcon-5.cs1cloud.internal:8020=/etc/hadoop-3,arpit-new-falcon-5.cs1cloud.internal:8032=/etc/hadoop-3</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-</verbatim>
-
----+++ Hive
-
-   * Dated Partitions
-Falcon does not work well when table partition contains multiple dated columns. Falcon only works
-with a single dated partition. This is being tracked in FALCON-357 which is a limitation in Oozie.
-<verbatim>
-catalog:default:table4#year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR};minute=${MINUTE}
-</verbatim>
-
-   * [[https://issues.apache.org/jira/browse/HIVE-5550][Hive table import fails for tables created with default text and sequence file formats using HCatalog API]]
-For some arcane reason, hive substitutes the output format for text and sequence to be prefixed with Hive.
-Hive table import fails since it compares against the input and output formats of the source table and they are
-different. Say, a table was created with out specifying the file format, it defaults to:
-<verbatim>
-fileFormat=TextFile, inputformat=org.apache.hadoop.mapred.TextInputFormat, outputformat=org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-</verbatim>
-
-But, when hive fetches the table from the metastore, it replaces the output format with org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-and the comparison between source and target table fails.
-<verbatim>
-org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer#checkTable
-      // check IF/OF/Serde
-      String existingifc = table.getInputFormatClass().getName();
-      String importedifc = tableDesc.getInputFormat();
-      String existingofc = table.getOutputFormatClass().getName();
-      String importedofc = tableDesc.getOutputFormat();
-      if ((!existingifc.equals(importedifc))
-          || (!existingofc.equals(importedofc))) {
-        throw new SemanticException(
-            ErrorMsg.INCOMPATIBLE_SCHEMA
-                .getMsg(" Table inputformat/outputformats do not match"));
-      }
-</verbatim>
-The above is not an issue with Hive 0.13.
-
----++ Hive Examples
-Following is an example entity configuration for lifecycle management functions for tables in Hive.
-
----+++ Hive Table Lifecycle Management - Replication and Retention
-
----++++ Primary Cluster
-
-<verbatim>
-<?xml version="1.0"?>
-<!--
-    Primary cluster configuration for demo vm
-  -->
-<cluster colo="west-coast" description="Primary Cluster"
-         name="primary-cluster"
-         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:10070"
-                   version="1.1.1" />
-        <interface type="write" endpoint="hdfs://localhost:10020"
-                   version="1.1.1" />
-        <interface type="execute" endpoint="localhost:10300"
-                   version="1.1.1" />
-        <interface type="workflow" endpoint="http://localhost:11010/oozie/"
-                   version="4.0.1" />
-        <interface type="registry" endpoint="thrift://localhost:19083"
-                   version="0.11.0" />
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.4.3" />
-    </interfaces>
-    <locations>
-        <location name="staging" path="/apps/falcon/staging" />
-        <location name="temp" path="/tmp" />
-        <location name="working" path="/apps/falcon/working" />
-    </locations>
-</cluster>
-</verbatim>
-
----++++ BCP Cluster
-
-<verbatim>
-<?xml version="1.0"?>
-<!--
-    BCP cluster configuration for demo vm
-  -->
-<cluster colo="east-coast" description="BCP Cluster"
-         name="bcp-cluster"
-         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://localhost:20070"
-                   version="1.1.1" />
-        <interface type="write" endpoint="hdfs://localhost:20020"
-                   version="1.1.1" />
-        <interface type="execute" endpoint="localhost:20300"
-                   version="1.1.1" />
-        <interface type="workflow" endpoint="http://localhost:11020/oozie/"
-                   version="4.0.1" />
-        <interface type="registry" endpoint="thrift://localhost:29083"
-                   version="0.11.0" />
-        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
-                   version="5.4.3" />
-    </interfaces>
-    <locations>
-        <location name="staging" path="/apps/falcon/staging" />
-        <location name="temp" path="/tmp" />
-        <location name="working" path="/apps/falcon/working" />
-    </locations>
-</cluster>
-</verbatim>
-
----++++ Feed with replication and eviction policy
-
-<verbatim>
-<?xml version="1.0"?>
-<!--
-    Replicating Hourly customer table from primary to secondary cluster.
-  -->
-<feed description="Replicating customer table feed" name="customer-table-replicating-feed"
-      xmlns="uri:falcon:feed:0.1">
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <clusters>
-        <cluster name="primary-cluster" type="source">
-            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
-            <retention limit="hours(2)" action="delete"/>
-        </cluster>
-        <cluster name="bcp-cluster" type="target">
-            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
-            <retention limit="days(30)" action="delete"/>
-
-            <table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-
-    <ACL owner="seetharam" group="users" permission="0755"/>
-    <schema location="" provider="hcatalog"/>
-</feed>
-</verbatim>
-
-
----+++ Hive Table used in Processing Pipelines
-
----++++ Primary Cluster
-The cluster definition from the lifecycle example can be used.
-
----++++ Input Feed
-
-<verbatim>
-<?xml version="1.0"?>
-<feed description="clicks log table " name="input-table" xmlns="uri:falcon:feed:0.1">
-    <groups>online,bi</groups>
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <clusters>
-        <cluster name="##cluster##" type="source">
-            <validity start="2010-01-01T00:00Z" end="2012-04-21T00:00Z"/>
-            <retention limit="hours(24)" action="delete"/>
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:falcon_db:input_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>
-</verbatim>
-
-
----++++ Output Feed
-
-<verbatim>
-<?xml version="1.0"?>
-<feed description="clicks log identity table" name="output-table" xmlns="uri:falcon:feed:0.1">
-    <groups>online,bi</groups>
-    <frequency>hours(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <clusters>
-        <cluster name="##cluster##" type="source">
-            <validity start="2010-01-01T00:00Z" end="2012-04-21T00:00Z"/>
-            <retention limit="hours(24)" action="delete"/>
-        </cluster>
-    </clusters>
-
-    <table uri="catalog:falcon_db:output_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
-
-    <ACL owner="testuser" group="group" permission="0x755"/>
-    <schema location="/schema/clicks" provider="protobuf"/>
-</feed>
-</verbatim>
-
-
----++++ Process
-
-<verbatim>
-<?xml version="1.0"?>
-<process name="##processName##" xmlns="uri:falcon:process:0.1">
-    <clusters>
-        <cluster name="##cluster##">
-            <validity end="2012-04-22T00:00Z" start="2012-04-21T00:00Z"/>
-        </cluster>
-    </clusters>
-
-    <parallel>1</parallel>
-    <order>FIFO</order>
-    <frequency>days(1)</frequency>
-    <timezone>UTC</timezone>
-
-    <inputs>
-        <input end="today(0,0)" start="today(0,0)" feed="input-table" name="input"/>
-    </inputs>
-
-    <outputs>
-        <output instance="now(0,0)" feed="output-table" name="output"/>
-    </outputs>
-
-    <properties>
-        <property name="blah" value="blah"/>
-    </properties>
-
-    <workflow engine="pig" path="/falcon/test/apps/pig/table-id.pig"/>
-
-    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
-</process>
-</verbatim>
-
-
----++++ Pig Script
-
-<verbatim>
-A = load '$input_database.$input_table' using org.apache.hcatalog.pig.HCatLoader();
-B = FILTER A BY $input_filter;
-C = foreach B generate id, value;
-store C into '$output_database.$output_table' USING org.apache.hcatalog.pig.HCatStorer('$output_dataout_partitions');
-</verbatim>


[06/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceStatusTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceStatusTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceStatusTest.java
deleted file mode 100644
index 514fd10..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceStatusTest.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-
-/**
- * Feed instance status tests.
- */
-@Test(groups = "embedded")
-public class FeedInstanceStatusTest extends BaseTestClass {
-
-    private String baseTestDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestDir + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-
-    private ColoHelper cluster2 = servers.get(1);
-    private ColoHelper cluster3 = servers.get(2);
-    private FileSystem cluster2FS = serverFS.get(1);
-    private FileSystem cluster3FS = serverFS.get(2);
-    private static final Logger LOGGER = Logger.getLogger(FeedInstanceStatusTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        Bundle bundle = BundleUtil.readELBundle();
-        for (int i = 0; i < 3; i++) {
-            bundles[i] = new Bundle(bundle, servers.get(i));
-            bundles[i].generateUniqueBundle(this);
-            bundles[i].setProcessWorkflow(aggregateWorkflowDir);
-        }
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Goes through the whole feed replication workflow checking its instances status while.
-     * submitting feed, scheduling it, performing different combinations of actions like
-     * -submit, -resume, -kill, -rerun.
-     */
-    @Test(groups = {"multiCluster"})
-    public void feedInstanceStatusRunning() throws Exception {
-        bundles[0].setInputFeedDataPath(feedInputPath);
-
-        AssertUtil.assertSucceeded(prism.getClusterHelper()
-            .submitEntity(bundles[0].getClusters().get(0)));
-
-        AssertUtil.assertSucceeded(prism.getClusterHelper()
-            .submitEntity(bundles[1].getClusters().get(0)));
-
-        AssertUtil.assertSucceeded(prism.getClusterHelper()
-            .submitEntity(bundles[2].getClusters().get(0)));
-
-        String feed = bundles[0].getDataSets().get(0);
-        String feedName = Util.readEntityName(feed);
-        feed = FeedMerlin.fromString(feed).clearFeedClusters().toString();
-        String startTime = TimeUtil.getTimeWrtSystemTime(-50);
-        final String startPlus20Min = TimeUtil.addMinsToTime(startTime, 20);
-        final String startPlus40Min = TimeUtil.addMinsToTime(startTime, 40);
-        final String startPlus100Min = TimeUtil.addMinsToTime(startTime, 100);
-
-        feed = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feed = FeedMerlin.fromString(feed).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startPlus20Min,
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feed = FeedMerlin.fromString(feed).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[2].getClusters().get(0)))
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startPlus40Min,
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("feed: " + Util.prettyPrintXml(feed));
-
-        //status before submit
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus100Min
-                + "&end=" + TimeUtil.addMinsToTime(startTime, 120));
-
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(feed));
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + startPlus100Min);
-
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed));
-
-        // both replication instances
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + startPlus100Min);
-
-        // single instance at -30
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus20Min);
-
-        //single at -10
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        //single at 10
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        //single at 30
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        String postFix = "/US/" + cluster2.getClusterHelper().getColoName();
-        String prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), cluster2FS);
-        HadoopUtil.lateDataReplenish(cluster2FS, 80, 20, prefix, postFix);
-
-        postFix = "/UK/" + cluster3.getClusterHelper().getColoName();
-        prefix = bundles[0].getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), cluster3FS);
-        HadoopUtil.lateDataReplenish(cluster3FS, 80, 20, prefix, postFix);
-
-        // both replication instances
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + startPlus100Min);
-
-        // single instance at -30
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus20Min);
-
-        //single at -10
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        //single at 10
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        //single at 30
-        prism.getFeedHelper().getProcessInstanceStatus(feedName, "?start=" + startPlus40Min);
-
-        LOGGER.info("Wait till feed goes into running ");
-
-        //suspend instances -10
-        prism.getFeedHelper().getProcessInstanceSuspend(feedName, "?start=" + startPlus40Min);
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startPlus20Min + "&end=" + startPlus40Min);
-
-        //resuspend -10 and suspend -30 source specific
-        prism.getFeedHelper().getProcessInstanceSuspend(feedName,
-            "?start=" + startPlus20Min + "&end=" + startPlus40Min);
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startPlus20Min + "&end=" + startPlus40Min);
-
-        //resume -10 and -30
-        prism.getFeedHelper().getProcessInstanceResume(feedName,
-            "?start=" + startPlus20Min + "&end=" + startPlus40Min);
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startPlus20Min + "&end=" + startPlus40Min);
-
-        //get running instances
-        prism.getFeedHelper().getRunningInstance(feedName);
-
-        //rerun succeeded instance
-        prism.getFeedHelper().getProcessInstanceRerun(feedName, "?start=" + startTime);
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + startPlus20Min);
-
-        //kill instance
-        prism.getFeedHelper().getProcessInstanceKill(feedName,
-            "?start=" + TimeUtil.addMinsToTime(startTime, 44));
-        prism.getFeedHelper().getProcessInstanceKill(feedName, "?start=" + startTime);
-
-        //end time should be less than end of validity i.e startTime + 110
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + TimeUtil.addMinsToTime(startTime, 110));
-
-        //rerun killed instance
-        prism.getFeedHelper().getProcessInstanceRerun(feedName, "?start=" + startTime);
-        prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + TimeUtil.addMinsToTime(startTime, 110));
-
-        //kill feed
-        prism.getFeedHelper().delete(feed);
-        InstancesResult responseInstance = prism.getFeedHelper().getProcessInstanceStatus(feedName,
-            "?start=" + startTime + "&end=" + TimeUtil.addMinsToTime(startTime, 110));
-
-        LOGGER.info(responseInstance.getMessage());
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedLateRerunTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedLateRerunTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedLateRerunTest.java
deleted file mode 100644
index 5bb5e6e..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedLateRerunTest.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.List;
-
-/**
- * This test submits and schedules feed and then check for replication.
- * On adding further late data it checks whether the data has been replicated correctly in the given late cut-off time.
- * Assuming that late frequency set in server is 3 minutes. Although value can be changed according to requirement.
- */
-@Test(groups = "embedded")
-public class FeedLateRerunTest extends BaseTestClass {
-
-    private ColoHelper cluster1 = servers.get(0);
-    private ColoHelper cluster2 = servers.get(1);
-    private FileSystem cluster1FS = serverFS.get(0);
-    private FileSystem cluster2FS = serverFS.get(1);
-    private OozieClient cluster2OC = serverOC.get(1);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String feedDataLocation = baseTestDir + "/source" + MINUTE_DATE_PATTERN;
-    private String targetPath = baseTestDir + "/target";
-    private String targetDataLocation = targetPath + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(FeedLateRerunTest.class);
-    private String source = null;
-    private String target = null;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws JAXBException, IOException {
-        Bundle bundle = BundleUtil.readFeedReplicationBundle();
-        bundles[0] = new Bundle(bundle, cluster1);
-        bundles[1] = new Bundle(bundle, cluster2);
-        bundles[0].generateUniqueBundle(this);
-        bundles[1].generateUniqueBundle(this);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(dataProvider = "dataFlagProvider")
-    public void testLateRerun(boolean dataFlag)
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException,
-        OozieClientException, JAXBException {
-        Bundle.submitCluster(bundles[0], bundles[1]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 30);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-
-        //configure feed
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(targetDataLocation)
-                .build());
-        String entityName = feed.getName();
-
-        //submit and schedule feed
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //check if coordinator exists
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, entityName, "REPLICATION"), 1);
-
-        //Finding bundleId of replicated instance on target
-        String bundleId = OozieUtil.getLatestBundleID(cluster2OC, entityName, EntityType.FEED);
-
-        //Finding and creating missing dependencies
-        List<String> missingDependencies = getAndCreateDependencies(
-            cluster1FS, cluster1.getPrefix(), cluster2OC, bundleId, dataFlag, entityName);
-        int count = 1;
-        for (String location : missingDependencies) {
-            if (count==1) {
-                source = location;
-                count++;
-            }
-        }
-        source=splitPathFromIp(source, "8020");
-        LOGGER.info("source : " + source);
-        target = source.replace("source", "target");
-        LOGGER.info("target : " + target);
-        /* Sleep for some time ( as is defined in runtime property of server ).
-           Let the instance rerun and then it should succeed.*/
-        int sleepMins = 8;
-        for(int i=0; i < sleepMins; i++) {
-            LOGGER.info("Waiting...");
-            TimeUtil.sleepSeconds(60);
-        }
-        String bundleID = OozieUtil.getLatestBundleID(cluster2OC, entityName, EntityType.FEED);
-        OozieUtil.validateRetryAttempts(cluster2OC, bundleID, EntityType.FEED, 1);
-
-        //check if data has been replicated correctly
-        List<Path> cluster1ReplicatedData = HadoopUtil
-            .getAllFilesRecursivelyHDFS(cluster1FS, new Path(HadoopUtil.cutProtocol(source)));
-        List<Path> cluster2ReplicatedData = HadoopUtil
-            .getAllFilesRecursivelyHDFS(cluster2FS, new Path(HadoopUtil.cutProtocol(target)));
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster2ReplicatedData);
-    }
-
-    private String splitPathFromIp(String src, String port) {
-        String reqSrc, tempSrc = "";
-        if (src.contains(":")) {
-            String[] tempPath = src.split(":");
-            for (String aTempPath : tempPath) {
-                if (aTempPath.startsWith(port)) {
-                    tempSrc = aTempPath;
-                }
-            }
-        }
-        if (tempSrc.isEmpty()) {
-            reqSrc = src;
-        } else {
-            reqSrc=tempSrc.replace(port, "");
-        }
-        return reqSrc;
-    }
-
-    /* prismHelper1 - source colo, prismHelper2 - target colo */
-    private List<String> getAndCreateDependencies(FileSystem sourceFS, String prefix, OozieClient targetOC,
-            String bundleId, boolean dataFlag, String entityName) throws OozieClientException, IOException {
-        List<String> missingDependencies = OozieUtil.getMissingDependencies(targetOC, bundleId);
-        for (int i = 0; i < 10 && missingDependencies == null; ++i) {
-            TimeUtil.sleepSeconds(30);
-            LOGGER.info("sleeping...");
-            missingDependencies = OozieUtil.getMissingDependencies(targetOC, bundleId);
-        }
-        Assert.assertNotNull(missingDependencies, "Missing dependencies not found.");
-        //print missing dependencies
-        for (String dependency : missingDependencies) {
-            LOGGER.info("dependency from job: " + dependency);
-        }
-        // Creating missing dependencies
-        HadoopUtil.createFolders(sourceFS, prefix, missingDependencies);
-        //Adding data to empty folders depending on dataFlag
-        if (dataFlag) {
-            int tempCount = 1;
-            for (String location : missingDependencies) {
-                if (tempCount==1) {
-                    LOGGER.info("Transferring data to : " + location);
-                    HadoopUtil.copyDataToFolder(sourceFS, location, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-                    tempCount++;
-                }
-            }
-        }
-        //replication should start, wait while it ends
-        InstanceUtil.waitTillInstanceReachState(targetOC, entityName, 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-        // Adding data for late rerun
-        int tempCounter = 1;
-        for (String dependency : missingDependencies) {
-            if (tempCounter==1) {
-                LOGGER.info("Transferring late data to : " + dependency);
-                HadoopUtil.copyDataToFolder(sourceFS, dependency,
-                    OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.properties"));
-            }
-            tempCounter++;
-        }
-        return missingDependencies;
-    }
-
-    @DataProvider(name = "dataFlagProvider")
-    private Object[][] dataFlagProvider() {
-        return new Object[][] {
-            new Object[] {true, },
-            new Object[] {false, },
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedReplicationTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedReplicationTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedReplicationTest.java
deleted file mode 100644
index a936aa1..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedReplicationTest.java
+++ /dev/null
@@ -1,581 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.supportClasses.ExecResult;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * feed replication test.
- * Replicates empty directories as well as directories containing data.
- */
-@Test(groups = "embedded")
-public class FeedReplicationTest extends BaseTestClass {
-
-    private ColoHelper cluster1 = servers.get(0);
-    private ColoHelper cluster2 = servers.get(1);
-    private ColoHelper cluster3 = servers.get(2);
-    private FileSystem cluster1FS = serverFS.get(0);
-    private FileSystem cluster2FS = serverFS.get(1);
-    private FileSystem cluster3FS = serverFS.get(2);
-    private OozieClient cluster2OC = serverOC.get(1);
-    private OozieClient cluster3OC = serverOC.get(2);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String sourcePath = baseTestDir + "/source";
-    private String feedDataLocation = baseTestDir + "/source" + MINUTE_DATE_PATTERN;
-    private String targetPath = baseTestDir + "/target";
-    private String targetDataLocation = targetPath + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(FeedReplicationTest.class);
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws JAXBException, IOException {
-        Bundle bundle = BundleUtil.readFeedReplicationBundle();
-
-        bundles[0] = new Bundle(bundle, cluster1);
-        bundles[1] = new Bundle(bundle, cluster2);
-        bundles[2] = new Bundle(bundle, cluster3);
-
-        bundles[0].generateUniqueBundle(this);
-        bundles[1].generateUniqueBundle(this);
-        bundles[2].generateUniqueBundle(this);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-        cleanTestsDirs();
-    }
-
-    /**
-     * Test demonstrates replication of stored data from one source cluster to one target cluster.
-     * It checks the lifecycle of replication workflow instance including its creation. When
-     * replication ends test checks if data was replicated correctly.
-     * Also checks for presence of _SUCCESS file in target directory.
-     */
-    @Test(dataProvider = "dataFlagProvider")
-    public void replicate1Source1Target(boolean dataFlag)
-        throws Exception {
-        Bundle.submitCluster(bundles[0], bundles[1]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-
-        //configure feed
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(targetDataLocation)
-                .build());
-        feed.withProperty("job.counter", "true");
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //upload necessary data
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy'/'MM'/'dd'/'HH'/'mm'");
-        String timePattern = fmt.print(date);
-        String sourceLocation = sourcePath + "/" + timePattern + "/";
-        String targetLocation = targetPath + "/" + timePattern + "/";
-        HadoopUtil.recreateDir(cluster1FS, sourceLocation);
-
-        Path toSource = new Path(sourceLocation);
-        Path toTarget = new Path(targetLocation);
-        if (dataFlag) {
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile1.txt"));
-        }
-
-        //check if coordinator exists
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feed.getName(), "REPLICATION"), 1);
-
-        //replication should start, wait while it ends
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, Util.readEntityName(feed.toString()), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        //check if data has been replicated correctly
-        List<Path> cluster1ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster1FS, toSource);
-        List<Path> cluster2ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster2FS, toTarget);
-
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster2ReplicatedData);
-
-        //_SUCCESS does not exist in source
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster1FS, toSource, ""), false);
-
-        //_SUCCESS should exist in target
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster2FS, toTarget, ""), true);
-
-        AssertUtil.assertLogMoverPath(true, Util.readEntityName(feed.toString()),
-            cluster2FS, "feed", "Success logs are not present");
-
-        ExecResult execResult = cluster1.getFeedHelper().getCLIMetrics(feed.getName());
-        AssertUtil.assertCLIMetrics(execResult, feed.getName(), 1, dataFlag);
-    }
-
-    /**
-     * Test demonstrates replication of stored data from one source cluster to two target clusters.
-     * It checks the lifecycle of replication workflow instances including their creation on both
-     * targets. When replication ends test checks if data was replicated correctly.
-     * Also checks for presence of _SUCCESS file in target directory.
-     */
-    @Test(dataProvider = "dataFlagProvider")
-    public void replicate1Source2Targets(boolean dataFlag) throws Exception {
-        Bundle.submitCluster(bundles[0], bundles[1], bundles[2]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-
-        //configure feed
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-                new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                        .withRetention("days(1000000)", ActionType.DELETE)
-                        .withValidity(startTime, endTime)
-                        .withClusterType(ClusterType.SOURCE)
-                        .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-                new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                        .withRetention("days(1000000)", ActionType.DELETE)
-                        .withValidity(startTime, endTime)
-                        .withClusterType(ClusterType.TARGET)
-                        .withDataLocation(targetDataLocation)
-                        .build());
-        //set cluster3 as target
-        feed.addFeedCluster(
-                new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[2].getClusters().get(0)))
-                        .withRetention("days(1000000)", ActionType.DELETE)
-                        .withValidity(startTime, endTime)
-                        .withClusterType(ClusterType.TARGET)
-                        .withDataLocation(targetDataLocation)
-                        .build());
-        feed.withProperty("job.counter", "true");
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //upload necessary data
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy'/'MM'/'dd'/'HH'/'mm'");
-        String timePattern = fmt.print(date);
-        String sourceLocation = sourcePath + "/" + timePattern + "/";
-        String targetLocation = targetPath + "/" + timePattern + "/";
-        HadoopUtil.recreateDir(cluster1FS, sourceLocation);
-
-        Path toSource = new Path(sourceLocation);
-        Path toTarget = new Path(targetLocation);
-
-        if (dataFlag) {
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile1.txt"));
-        }
-
-        //check if all coordinators exist
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-        InstanceUtil.waitTillInstancesAreCreated(cluster3OC, feed.toString(), 0);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feed.getName(), "REPLICATION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feed.getName(), "REPLICATION"), 1);
-        //replication on cluster 2 should start, wait till it ends
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        //replication on cluster 3 should start, wait till it ends
-        InstanceUtil.waitTillInstanceReachState(cluster3OC, feed.getName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        //check if data has been replicated correctly
-        List<Path> cluster1ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster1FS, toSource);
-        List<Path> cluster2ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster2FS, toTarget);
-        List<Path> cluster3ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster3FS, toTarget);
-
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster2ReplicatedData);
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster3ReplicatedData);
-
-        //_SUCCESS does not exist in source
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster1FS, toSource, ""), false);
-
-        //_SUCCESS should exist in target
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster2FS, toTarget, ""), true);
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster3FS, toTarget, ""), true);
-
-        AssertUtil.assertLogMoverPath(true, Util.readEntityName(feed.toString()),
-            cluster2FS, "feed", "Success logs are not present");
-
-        ExecResult execResult = cluster1.getFeedHelper().getCLIMetrics(feed.getName());
-        AssertUtil.assertCLIMetrics(execResult, feed.getName(), 1, dataFlag);
-    }
-
-    /**
-     * Test demonstrates how replication depends on availability flag. Scenario includes one
-     * source and one target cluster. When feed is submitted and scheduled and data is available,
-     * feed still waits for availability flag (file which name is defined as availability flag in
-     * feed definition). As soon as mentioned file is got uploaded in data directory,
-     * replication starts and when it ends test checks if data was replicated correctly.
-     * Also checks for presence of availability flag in target directory.
-     */
-    @Test(dataProvider = "dataFlagProvider")
-    public void availabilityFlagTest(boolean dataFlag) throws Exception {
-        //replicate1Source1Target scenario + set availability flag but don't upload required file
-        Bundle.submitCluster(bundles[0], bundles[1]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-
-        //configure feed
-        String availabilityFlagName = "availabilityFlag.txt";
-        String feedName = Util.readEntityName(bundles[0].getDataSets().get(0));
-        FeedMerlin feedElement = bundles[0].getFeedElement(feedName);
-        feedElement.setAvailabilityFlag(availabilityFlagName);
-        bundles[0].writeFeedElement(feedElement, feedName);
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(targetDataLocation)
-                .build());
-        feed.withProperty("job.counter", "true");
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //upload necessary data
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy'/'MM'/'dd'/'HH'/'mm'");
-        String timePattern = fmt.print(date);
-        String sourceLocation = sourcePath + "/" + timePattern + "/";
-        String targetLocation = targetPath + "/" + timePattern + "/";
-        HadoopUtil.recreateDir(cluster1FS, sourceLocation);
-
-        Path toSource = new Path(sourceLocation);
-        Path toTarget = new Path(targetLocation);
-        if (dataFlag) {
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-            HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation,
-                OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile1.txt"));
-        }
-
-        //check while instance is got created
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-
-        //check if coordinator exists
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 1);
-
-        //replication should not start even after time
-        TimeUtil.sleepSeconds(60);
-        InstancesResult r = prism.getFeedHelper().getProcessInstanceStatus(feedName,
-                "?start=" + startTime + "&end=" + endTime);
-        InstanceUtil.validateResponse(r, 1, 0, 0, 1, 0);
-        LOGGER.info("Replication didn't start.");
-
-        //create availability flag on source
-        HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation, OSUtil.concat(OSUtil.RESOURCES, availabilityFlagName));
-
-        //check if instance become running
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-            CoordinatorAction.Status.RUNNING, EntityType.FEED);
-
-        //wait till instance succeed
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        //check if data was replicated correctly
-        List<Path> cluster1ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster1FS, toSource);
-        LOGGER.info("Data on source cluster: " + cluster1ReplicatedData);
-        List<Path> cluster2ReplicatedData = HadoopUtil
-                .getAllFilesRecursivelyHDFS(cluster2FS, toTarget);
-        LOGGER.info("Data on target cluster: " + cluster2ReplicatedData);
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster2ReplicatedData);
-
-        //availabilityFlag exists in source
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster1FS, toSource, availabilityFlagName), true);
-
-        //availabilityFlag should exist in target
-        Assert.assertEquals(HadoopUtil.getSuccessFolder(cluster2FS, toTarget, availabilityFlagName), true);
-
-        AssertUtil.assertLogMoverPath(true, Util.readEntityName(feed.toString()),
-            cluster2FS, "feed", "Success logs are not present");
-
-        ExecResult execResult = cluster1.getFeedHelper().getCLIMetrics(feed.getName());
-        AssertUtil.assertCLIMetrics(execResult, feed.getName(), 1, dataFlag);
-    }
-
-    /**
-     * Test for https://issues.apache.org/jira/browse/FALCON-668.
-     * Check that new DistCp options are allowed.
-     */
-    @Test
-    public void testNewDistCpOptions() throws Exception {
-        Bundle.submitCluster(bundles[0], bundles[1]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        //configure feed
-        String feedName = Util.readEntityName(bundles[0].getDataSets().get(0));
-        FeedMerlin feedElement = bundles[0].getFeedElement(feedName);
-        bundles[0].writeFeedElement(feedElement, feedName);
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(targetDataLocation)
-                .build());
-        feed.withProperty("job.counter", "true");
-
-        //add custom properties to feed
-        HashMap<String, String> propMap = new HashMap<>();
-        propMap.put("overwrite", "true");
-        propMap.put("ignoreErrors", "false");
-        propMap.put("skipChecksum", "false");
-        propMap.put("removeDeletedFiles", "true");
-        propMap.put("preserveBlockSize", "true");
-        propMap.put("preserveReplicationNumber", "true");
-        propMap.put("preservePermission", "true");
-        for (Map.Entry<String, String> entry : propMap.entrySet()) {
-            feed.withProperty(entry.getKey(), entry.getValue());
-        }
-        //add custom property which shouldn't be passed to workflow
-        HashMap<String, String> unsupportedPropMap = new HashMap<>();
-        unsupportedPropMap.put("myCustomProperty", "true");
-        feed.withProperty("myCustomProperty", "true");
-
-        //upload necessary data to source
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy'/'MM'/'dd'/'HH'/'mm'");
-        String timePattern = fmt.print(date);
-        String sourceLocation = sourcePath + "/" + timePattern + "/";
-        HadoopUtil.recreateDir(cluster1FS, sourceLocation);
-        HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-        HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation, OSUtil.concat(OSUtil.NORMAL_INPUT,  "dataFile1.txt"));
-
-        //copy 2 files to target to check if they will be deleted because of removeDeletedFiles property
-        String targetLocation = targetPath + "/" + timePattern + "/";
-        cluster2FS.copyFromLocalFile(new Path(OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile3.txt")),
-            new Path(targetLocation + "dataFile3.txt"));
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //check while instance is got created
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-
-        //check if coordinator exists and replication starts
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feed.getName(), "REPLICATION"), 1);
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-            CoordinatorAction.Status.RUNNING, EntityType.FEED);
-
-        //check that properties were passed to workflow definition
-        String bundleId = OozieUtil.getLatestBundleID(cluster2OC, feedName, EntityType.FEED);
-        String coordId = OozieUtil.getReplicationCoordID(bundleId, cluster2.getFeedHelper()).get(0);
-        CoordinatorAction coordinatorAction = cluster2OC.getCoordJobInfo(coordId).getActions().get(0);
-        String wfDefinition = cluster2OC.getJobDefinition(coordinatorAction.getExternalId());
-        LOGGER.info(String.format("Definition of coordinator job action %s : \n %s \n",
-            coordinatorAction.getExternalId(), Util.prettyPrintXml(wfDefinition)));
-        Assert.assertTrue(OozieUtil.propsArePresentInWorkflow(wfDefinition, "replication", propMap),
-            "New distCp supported properties should be passed to replication args list.");
-        Assert.assertFalse(OozieUtil.propsArePresentInWorkflow(wfDefinition, "replication", unsupportedPropMap),
-            "Unsupported properties shouldn't be passed to replication args list.");
-
-        //check that replication succeeds
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        List<Path> finalFiles = HadoopUtil.getAllFilesRecursivelyHDFS(cluster2FS, new Path(targetPath));
-        Assert.assertEquals(finalFiles.size(), 2, "Only replicated files should be present on target "
-            + "because of 'removeDeletedFiles' distCp property.");
-
-        ExecResult execResult = cluster1.getFeedHelper().getCLIMetrics(feed.getName());
-        AssertUtil.assertCLIMetrics(execResult, feed.getName(), 1, true);
-    }
-
-    /**
-     * Test demonstrates failure pf replication of stored data from one source cluster to one target cluster.
-     * When replication job fails test checks if failed logs are present in staging directory or not.
-     */
-    @Test
-    public void replicate1Source1TargetFail()
-        throws Exception {
-        Bundle.submitCluster(bundles[0], bundles[1]);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-
-        //configure feed
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        feed.setFilePath(feedDataLocation);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set cluster1 as source
-        feed.addFeedCluster(
-                new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                        .withRetention("days(1000000)", ActionType.DELETE)
-                        .withValidity(startTime, endTime)
-                        .withClusterType(ClusterType.SOURCE)
-                        .build());
-        //set cluster2 as target
-        feed.addFeedCluster(
-                new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                        .withRetention("days(1000000)", ActionType.DELETE)
-                        .withValidity(startTime, endTime)
-                        .withClusterType(ClusterType.TARGET)
-                        .withDataLocation(targetDataLocation)
-                        .build());
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-
-        //upload necessary data
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy'/'MM'/'dd'/'HH'/'mm'");
-        String timePattern = fmt.print(date);
-        String sourceLocation = sourcePath + "/" + timePattern + "/";
-        String targetLocation = targetPath + "/" + timePattern + "/";
-        HadoopUtil.recreateDir(cluster1FS, sourceLocation);
-
-        Path toSource = new Path(sourceLocation);
-        Path toTarget = new Path(targetLocation);
-        HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-        HadoopUtil.copyDataToFolder(cluster1FS, sourceLocation, OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile1.txt"));
-
-        //check if coordinator exists
-        InstanceUtil.waitTillInstancesAreCreated(cluster2OC, feed.toString(), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feed.getName(), "REPLICATION"), 1);
-
-        //check if instance become running
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-                CoordinatorAction.Status.RUNNING, EntityType.FEED);
-
-        HadoopUtil.deleteDirIfExists(sourceLocation, cluster1FS);
-
-        //check if instance became killed
-        InstanceUtil.waitTillInstanceReachState(cluster2OC, feed.getName(), 1,
-                CoordinatorAction.Status.KILLED, EntityType.FEED);
-
-        AssertUtil.assertLogMoverPath(false, Util.readEntityName(feed.toString()),
-                cluster2FS, "feed", "Success logs are not present");
-    }
-
-    /* Flag value denotes whether to add data for replication or not.
-     * flag=true : add data for replication.
-     * flag=false : let empty directories be replicated.
-     */
-    @DataProvider(name = "dataFlagProvider")
-    private Object[][] dataFlagProvider() {
-        return new Object[][] {
-            new Object[] {true, },
-            new Object[] {false, },
-        };
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedResumeTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedResumeTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedResumeTest.java
deleted file mode 100644
index ec117d7..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedResumeTest.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed resume tests.
- */
-@Test(groups = "embedded")
-public class FeedResumeTest extends BaseTestClass {
-
-    private final AbstractEntityHelper feedHelper = prism.getFeedHelper();
-    private String feed;
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0].generateUniqueBundle(this);
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].submitClusters(prism);
-        feed = bundles[0].getInputFeedFromBundle();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Launches feed, suspends it and then resumes and checks if it got running.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void resumeSuspendedFeed() throws Exception {
-        AssertUtil.assertSucceeded(feedHelper.submitAndSchedule(feed));
-        AssertUtil.assertSucceeded(feedHelper.suspend(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-        AssertUtil.assertSucceeded(feedHelper.resume(feed));
-        ServiceResponse response = feedHelper.getStatus(feed);
-        String colo = feedHelper.getColo();
-        Assert.assertTrue(response.getMessage().contains(colo + "/RUNNING"));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-
-
-    /**
-     * Tries to resume feed that wasn't submitted and scheduled. Attempt should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void resumeNonExistentFeed() throws Exception {
-        AssertUtil.assertFailed(feedHelper.resume(feed));
-    }
-
-    /**
-     * Tries to resume deleted feed. Attempt should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void resumeDeletedFeed() throws Exception {
-        AssertUtil.assertSucceeded(feedHelper.submitAndSchedule(feed));
-        AssertUtil.assertSucceeded(feedHelper.delete(feed));
-        AssertUtil.assertFailed(feedHelper.resume(feed));
-    }
-
-    /**
-     * Tries to resume scheduled feed which wasn't suspended. Feed status shouldn't change.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void resumeScheduledFeed() throws Exception {
-        AssertUtil.assertSucceeded(feedHelper.submitAndSchedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-        AssertUtil.assertSucceeded(feedHelper.resume(feed));
-        ServiceResponse response = feedHelper.getStatus(feed);
-        String colo = feedHelper.getColo();
-        Assert.assertTrue(response.getMessage().contains(colo + "/RUNNING"));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSLATest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSLATest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSLATest.java
deleted file mode 100644
index 28ddbd7..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSLATest.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed SLA tests.
- */
-@Test(groups = "embedded")
-public class FeedSLATest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private String baseTestDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestDir + "/input" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(FeedSLATest.class);
-
-    private FeedMerlin feedMerlin;
-    private String startTime;
-    private String endTime;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        Bundle bundle = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundle, cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-
-        startTime = TimeUtil.getTimeWrtSystemTime(0);
-        endTime = TimeUtil.addMinsToTime(startTime, 120);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        ServiceResponse response =
-                prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-        AssertUtil.assertSucceeded(response);
-
-        feedMerlin = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        feedMerlin.setFrequency(new Frequency("1", Frequency.TimeUnit.hours));
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Submit feed with correctly adjusted sla. Response should reflect success.
-     *
-     */
-
-    @Test
-    public void submitValidFeedSLA() throws Exception {
-
-        feedMerlin.clearFeedClusters();
-        feedMerlin.addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .build());
-
-        //set slaLow and slaHigh
-        feedMerlin.setSla(new Frequency("3", Frequency.TimeUnit.hours), new Frequency("6", Frequency.TimeUnit.hours));
-
-        final ServiceResponse serviceResponse =
-                prism.getFeedHelper().submitEntity(feedMerlin.toString());
-        AssertUtil.assertSucceeded(serviceResponse);
-    }
-
-    /**
-     * Submit feed with slaHigh greater than  feed retention. Response should reflect failure.
-     *
-     */
-
-    @Test
-    public void submitFeedWithSLAHigherThanRetention() throws Exception {
-
-        feedMerlin.clearFeedClusters();
-        feedMerlin.addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention((new Frequency("2", Frequency.TimeUnit.hours)).toString(), ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .build());
-
-        //set slaLow and slaHigh
-        feedMerlin.setSla(new Frequency("3", Frequency.TimeUnit.hours), new Frequency("6", Frequency.TimeUnit.hours));
-
-        final ServiceResponse serviceResponse =
-                prism.getFeedHelper().submitEntity(feedMerlin.toString());
-        String message = "Feed's retention limit: "
-                + feedMerlin.getClusters().getClusters().get(0).getRetention().getLimit()
-                + " of referenced cluster " + bundles[0].getClusterNames().get(0)
-                + " should be more than feed's late arrival cut-off period: "
-                + feedMerlin.getSla().getSlaHigh().getTimeUnit()
-                + "(" + feedMerlin.getSla().getSlaHigh().getFrequency() + ")"
-                + " for feed: " + bundles[0].getInputFeedNameFromBundle();
-        validate(serviceResponse, message);
-    }
-
-
-    /**
-     * Submit feed with slaHigh less than  slaLow. Response should reflect failure.
-     *
-     */
-    @Test
-    public void submitFeedWithSLAHighLowerthanSLALow() throws Exception {
-
-        feedMerlin.clearFeedClusters();
-        feedMerlin.addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention((new Frequency("6", Frequency.TimeUnit.hours)).toString(), ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .build());
-
-        //set slaLow and slaHigh
-        feedMerlin.setSla(new Frequency("4", Frequency.TimeUnit.hours), new Frequency("2", Frequency.TimeUnit.hours));
-
-        final ServiceResponse serviceResponse =
-                prism.getFeedHelper().submitEntity(feedMerlin.toString());
-        String message = "slaLow of Feed: " + feedMerlin.getSla().getSlaLow().getTimeUnit() + "("
-                + feedMerlin.getSla().getSlaLow().getFrequency() + ")is greater than slaHigh: "
-                + feedMerlin.getSla().getSlaHigh().getTimeUnit() + "(" + feedMerlin.getSla().getSlaHigh().getFrequency()
-                + ") for cluster: " + bundles[0].getClusterNames().get(0);
-        validate(serviceResponse, message);
-    }
-
-    /**
-     * Submit feed with slaHigh and slaLow greater than feed retention. Response should reflect failure.
-     *
-     */
-    @Test
-    public void submitFeedWithSLAHighSLALowHigherThanRetention() throws Exception {
-
-        feedMerlin.clearFeedClusters();
-        feedMerlin.addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention((new Frequency("4", Frequency.TimeUnit.hours)).toString(), ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .build());
-
-        //set slaLow and slaHigh
-        feedMerlin.setSla(new Frequency("5", Frequency.TimeUnit.hours), new Frequency("6", Frequency.TimeUnit.hours));
-
-        final ServiceResponse serviceResponse =
-                prism.getFeedHelper().submitEntity(feedMerlin.toString());
-        String message = "Feed's retention limit: "
-                + feedMerlin.getClusters().getClusters().get(0).getRetention().getLimit()
-                + " of referenced cluster " + bundles[0].getClusterNames().get(0)
-                + " should be more than feed's late arrival cut-off period: "
-                + feedMerlin.getSla().getSlaHigh().getTimeUnit() +"(" + feedMerlin.getSla().getSlaHigh().getFrequency()
-                + ")" + " for feed: " + bundles[0].getInputFeedNameFromBundle();
-        validate(serviceResponse, message);
-    }
-
-    /**
-     * Submit feed with slaHigh and slaLow having equal value. Response should reflect success.
-     *
-     */
-    @Test
-    public void submitFeedWithSameSLAHighSLALow() throws Exception {
-
-        feedMerlin.clearFeedClusters();
-        feedMerlin.addFeedCluster(new FeedMerlin.FeedClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention((new Frequency("7", Frequency.TimeUnit.hours)).toString(), ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .build());
-
-        //set slaLow and slaHigh
-        feedMerlin.setSla(new Frequency("3", Frequency.TimeUnit.hours), new Frequency("3", Frequency.TimeUnit.hours));
-
-        final ServiceResponse serviceResponse =
-                prism.getFeedHelper().submitEntity(feedMerlin.toString());
-        AssertUtil.assertSucceeded(serviceResponse);
-    }
-
-    private void validate(ServiceResponse response, String message) throws Exception {
-        AssertUtil.assertFailed(response);
-        LOGGER.info("Expected message is : " + message);
-        Assert.assertTrue(response.getMessage().contains(message),
-                "Correct response was not present in feed schedule. Feed response is : "
-                        + response.getMessage());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedScheduleTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedScheduleTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedScheduleTest.java
deleted file mode 100644
index 79b722a..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedScheduleTest.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed schedule tests.
- */
-@Test(groups = "embedded")
-public class FeedScheduleTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String feed;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        Bundle.submitCluster(bundles[0]);
-        feed = bundles[0].getInputFeedFromBundle();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Tries to schedule already scheduled feed. Request should be considered as correct.
-     * Feed status shouldn't change.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void scheduleAlreadyScheduledFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().schedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-
-        //now try re-scheduling again
-        response = prism.getFeedHelper().schedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-
-    /**
-     * Schedule correct feed. Feed should got running.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void scheduleValidFeed() throws Exception {
-        //submit feed
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        //now schedule the thing
-        response = prism.getFeedHelper().schedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-
-    /**
-     * Tries to schedule already scheduled and suspended feed. Suspended status shouldn't change.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void scheduleSuspendedFeed() throws Exception {
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-
-        //now suspend
-        AssertUtil.assertSucceeded(prism.getFeedHelper().suspend(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-        //now schedule this!
-        AssertUtil.assertSucceeded(prism.getFeedHelper().schedule(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-    }
-
-    /**
-     * Schedules and deletes feed. Tries to schedule it. Request should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void scheduleKilledFeed() throws Exception {
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-
-        //now suspend
-        AssertUtil.assertSucceeded(prism.getFeedHelper().delete(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.KILLED);
-        //now schedule this!
-        AssertUtil.assertFailed(prism.getFeedHelper().schedule(feed));
-    }
-
-    /**
-     * Tries to schedule feed which wasn't submitted. Request should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void scheduleNonExistentFeed() throws Exception {
-        AssertUtil.assertFailed(prism.getFeedHelper().schedule(feed));
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedStatusTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedStatusTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedStatusTest.java
deleted file mode 100644
index d5e8696..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedStatusTest.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed status tests. Checks getStatus functionality.
- */
-@Test(groups = "embedded")
-public class FeedStatusTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String feed;
-    private static final Logger LOGGER = Logger.getLogger(FeedStatusTest.class);
-
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0].generateUniqueBundle(this);
-        bundles[0] = new Bundle(bundles[0], cluster);
-
-        //submit the cluster
-        ServiceResponse response =
-            prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-        AssertUtil.assertSucceeded(response);
-        feed = bundles[0].getInputFeedFromBundle();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedules feed. Queries a feed status and checks the response
-     * correctness and a feed status correspondence.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getStatusForScheduledFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feed));
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().getStatus(feed);
-
-        AssertUtil.assertSucceeded(response);
-
-        String colo = prism.getFeedHelper().getColo();
-        Assert.assertTrue(response.getMessage().contains(colo + "/RUNNING"));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-
-    /**
-     * Schedules and suspends feed. Queries a feed status and checks the response
-     * correctness and a feed status correspondence.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getStatusForSuspendedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().getStatus(feed);
-
-        AssertUtil.assertSucceeded(response);
-        String colo = prism.getFeedHelper().getColo();
-        Assert.assertTrue(response.getMessage().contains(colo + "/SUSPENDED"));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-    }
-
-    /**
-     * Submits feed. Queries a feed status and checks the response
-     * correctness and a feed status correspondence.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getStatusForSubmittedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().getStatus(feed);
-
-        AssertUtil.assertSucceeded(response);
-        String colo = prism.getFeedHelper().getColo();
-        Assert.assertTrue(response.getMessage().contains(colo + "/SUBMITTED"));
-        AssertUtil.checkNotStatus(clusterOC, EntityType.FEED, feed, Job.Status.RUNNING);
-    }
-
-    /**
-     * Removes feed. Queries a feed status. Checks that the response correctness.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getStatusForDeletedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().delete(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().getStatus(feed);
-        AssertUtil.assertFailed(response);
-
-        Assert.assertTrue(
-            response.getMessage().contains(Util.readEntityName(feed) + " (FEED) not found"));
-        AssertUtil.checkNotStatus(clusterOC, EntityType.FEED, feed, Job.Status.KILLED);
-    }
-
-    /**
-     * Queries a status of feed which wasn't submitted and checks the response.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void getStatusForNonExistentFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().getStatus(feed);
-        AssertUtil.assertFailed(response);
-        Assert.assertTrue(
-            response.getMessage().contains(Util.readEntityName(feed) + " (FEED) not found"));
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitAndScheduleTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitAndScheduleTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitAndScheduleTest.java
deleted file mode 100644
index f7bf0f8..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitAndScheduleTest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-/**
- * Feed submit and schedule tests.
- */
-@Test(groups = "embedded")
-public class FeedSubmitAndScheduleTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String feed;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        feed = bundles[0].getDataSets().get(0);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        //remove entities which belong to both default and different user
-        removeTestClassEntities(null, MerlinConstants.DIFFERENT_USER_NAME);
-    }
-
-    @Test(groups = {"singleCluster"})
-    public void snsNewFeed() throws Exception {
-        submitFirstClusterScheduleFirstFeed();
-    }
-
-    /**
-     * Submits and schedules feed with a cluster it depends on.
-     *
-     * @throws JAXBException
-     * @throws IOException
-     * @throws URISyntaxException
-     * @throws AuthenticationException
-     */
-    private void submitFirstClusterScheduleFirstFeed()
-        throws JAXBException, IOException, URISyntaxException, AuthenticationException,
-        InterruptedException {
-        AssertUtil.assertSucceeded(prism.getClusterHelper()
-            .submitEntity(bundles[0].getClusters().get(0)));
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Submits and schedules a feed and then tries to do the same on it. Checks that status
-     * hasn't changed and response is successful.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void snsExistingFeed() throws Exception {
-        submitFirstClusterScheduleFirstFeed();
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.RUNNING);
-
-        //get created bundle id
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, Util.readEntityName(feed), EntityType.FEED);
-
-        //try to submit and schedule the same process again
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.RUNNING);
-
-        //check that new bundle wasn't created
-        OozieUtil.verifyNewBundleCreation(clusterOC, bundleId, null, feed, false, false);
-    }
-
-    /**
-     * Try to submit and schedule feed without submitting cluster it depends on.
-     * Request should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void snsFeedWithoutCluster() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertFailed(response);
-    }
-
-    /**
-     * Submits and schedules feed. Removes it. Submitted and schedules removed feed.
-     * Checks response and status of feed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void snsDeletedFeed() throws Exception {
-        submitFirstClusterScheduleFirstFeed();
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.RUNNING);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().delete(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.KILLED);
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.RUNNING);
-    }
-
-    /**
-     * Suspends feed, submit and schedules it. Checks that response is successful,
-     * feed status hasn't changed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void snsSuspendedFeed() throws Exception {
-        submitFirstClusterScheduleFirstFeed();
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.RUNNING);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().suspend(feed));
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.SUSPENDED);
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, bundles[0], Job.Status.SUSPENDED);
-    }
-
-    /**
-     * Test for https://issues.apache.org/jira/browse/FALCON-1647.
-     * Create cluster entity as user1. Submit and schedule feed entity feed1 in this cluster as user1.
-     * Now try to submit and schedule a feed entity feed2 in this cluster as user2.
-     */
-    @Test
-    public void snsDiffFeedDiffUserSameCluster()
-        throws URISyntaxException, AuthenticationException, InterruptedException, IOException, JAXBException {
-        bundles[0].submitClusters(prism);
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed));
-        FeedMerlin feedMerlin = FeedMerlin.fromString(feed);
-        feedMerlin.setName(feedMerlin.getName() + "-2");
-        feedMerlin.setACL(MerlinConstants.DIFFERENT_USER_NAME, MerlinConstants.DIFFERENT_USER_GROUP, "*");
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(
-            feedMerlin.toString(), MerlinConstants.DIFFERENT_USER_NAME, null);
-        AssertUtil.assertSucceeded(response);
-    }
-}


[07/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELValidationsTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELValidationsTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELValidationsTest.java
deleted file mode 100644
index a0922cb..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ELValidationsTest.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.TestNGException;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.text.DecimalFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.TimeZone;
-
-
-/**
- * EL Validations tests.
- */
-@Test(groups = "embedded")
-public class ELValidationsTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private static final Logger LOGGER = Logger.getLogger(ELValidationsTest.class);
-    private String aggregateWorkflowDir = cleanAndGetTestDir() + "/aggregator";
-
-
-    @Test(groups = {"0.1", "0.2"})
-    public void startInstBeforeFeedStartToday02() throws Exception {
-        String response =
-            testWith("2009-02-02T20:00Z", "2011-12-31T00:00Z", "2009-02-02T20:00Z",
-                "2011-12-31T00:00Z", "now(-40,0)", "currentYear(20,30,24,20)", false);
-        validate(response);
-    }
-
-    @Test(groups = {"singleCluster"})
-    public void startInstAfterFeedEnd() throws Exception {
-        String response = testWith(null, null, null, null,
-            "currentYear(10,0,22,0)", "now(4,20)", false);
-        validate(response);
-    }
-
-    @Test(groups = {"singleCluster"})
-    public void bothInstReverse() throws Exception {
-        String response = testWith(null, null, null, null,
-            "now(0,0)", "now(-100,0)", false);
-        validate(response);
-    }
-
-    @Test(groups = {"singleCluster"}, dataProvider = "EL-DP")
-    public void expressionLanguageTest(String startInstance, String endInstance) throws Exception {
-        testWith(null, null, null, null, startInstance, endInstance, true);
-    }
-
-    @DataProvider(name = "EL-DP")
-    public Object[][] getELData() {
-        return new Object[][]{
-            {"now(-3,0)", "now(4,20)"},
-            {"yesterday(22,0)", "now(4,20)"},
-            {"currentMonth(0,22,0)", "now(4,20)"},
-            {"lastMonth(30,22,0)", "now(4,20)"},
-            {"currentYear(0,0,22,0)", "currentYear(1,1,22,0)"},
-            {"currentMonth(0,22,0)", "currentMonth(1,22,20)"},
-            {"lastMonth(30,22,0)", "lastMonth(60,2,40)"},
-            {"lastYear(12,0,22,0)", "lastYear(13,1,22,0)"},
-        };
-    }
-
-    private void validate(String response) {
-        if ((response.contains("End instance ") || response.contains("Start instance"))
-            && (response.contains("for feed") || response.contains("of feed"))
-            && (response.contains("is before the start of feed")
-            || response.contains("is after the end of feed"))) {
-            return;
-        }
-        if (response.contains("End instance")
-            && response.contains("is before the start instance")) {
-            return;
-        }
-        Assert.fail("Response is not valid");
-    }
-
-    private String testWith(String feedStart,
-                            String feedEnd, String processStart,
-                            String processEnd,
-                            String startInstance, String endInstance, boolean isMatch)
-        throws IOException, JAXBException, ParseException, URISyntaxException {
-        HadoopUtil.uploadDir(cluster.getClusterHelper().getHadoopFS(),
-            aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        Bundle bundle = BundleUtil.readELBundle();
-        bundle = new Bundle(bundle, cluster.getPrefix());
-        bundle.generateUniqueBundle(this);
-        bundle.setProcessWorkflow(aggregateWorkflowDir);
-        if (feedStart != null && feedEnd != null) {
-            bundle.setFeedValidity(feedStart, feedEnd, bundle.getInputFeedNameFromBundle());
-        }
-        if (processStart != null && processEnd != null) {
-            bundle.setProcessValidity(processStart, processEnd);
-        }
-        try {
-            bundle.setInvalidData();
-            bundle.setDatasetInstances(startInstance, endInstance);
-            String submitResponse = bundle.submitFeedsScheduleProcess(prism).getMessage();
-            LOGGER.info("processData in try is: " + Util.prettyPrintXml(bundle.getProcessData()));
-            TimeUtil.sleepSeconds(45);
-            if (isMatch) {
-                getAndMatchDependencies(serverOC.get(0), bundle);
-            }
-            return submitResponse;
-        } catch (Exception e) {
-            e.printStackTrace();
-            throw new TestNGException(e);
-        } finally {
-            LOGGER.info("deleting entity:");
-            bundle.deleteBundle(prism);
-        }
-    }
-
-    private void getAndMatchDependencies(OozieClient oozieClient, Bundle bundle) {
-        try {
-            List<String> bundles = null;
-            for (int i = 0; i < 10; ++i) {
-                bundles = OozieUtil.getBundles(oozieClient, bundle.getProcessName(), EntityType.PROCESS);
-                if (bundles.size() > 0) {
-                    break;
-                }
-                TimeUtil.sleepSeconds(30);
-            }
-            Assert.assertTrue(bundles != null && bundles.size() > 0, "Bundle job not created.");
-            String coordID = bundles.get(0);
-            LOGGER.info("coord id: " + coordID);
-            List<String> missingDependencies = OozieUtil.getMissingDependencies(oozieClient, coordID);
-            for (int i = 0; i < 10 && missingDependencies == null; ++i) {
-                TimeUtil.sleepSeconds(30);
-                missingDependencies = OozieUtil.getMissingDependencies(oozieClient, coordID);
-            }
-            Assert.assertNotNull(missingDependencies, "Missing dependencies not found.");
-            for (String dependency : missingDependencies) {
-                LOGGER.info("dependency from job: " + dependency);
-            }
-            Date jobNominalTime = OozieUtil.getNominalTime(oozieClient, coordID);
-            Calendar time = Calendar.getInstance();
-            time.setTime(jobNominalTime);
-            LOGGER.info("nominalTime:" + jobNominalTime);
-            SimpleDateFormat df = new SimpleDateFormat("dd MMM yyyy HH:mm:ss");
-            LOGGER.info(
-                "nominalTime in GMT string: " + df.format(jobNominalTime.getTime()) + " GMT");
-            TimeZone z = time.getTimeZone();
-            int offset = z.getRawOffset();
-            int offsetHrs = offset / 1000 / 60 / 60;
-            int offsetMins = offset / 1000 / 60 % 60;
-
-            LOGGER.info("offset: " + offsetHrs);
-            LOGGER.info("offset: " + offsetMins);
-
-            time.add(Calendar.HOUR_OF_DAY, (-offsetHrs));
-            time.add(Calendar.MINUTE, (-offsetMins));
-
-            LOGGER.info("GMT Time: " + time.getTime());
-
-            int frequency = bundle.getInitialDatasetFrequency();
-            List<String> qaDependencyList =
-                getQADepedencyList(time, bundle.getStartInstanceProcess(time),
-                    bundle.getEndInstanceProcess(time),
-                    frequency, bundle);
-            for (String qaDependency : qaDependencyList) {
-                LOGGER.info("qa qaDependencyList: " + qaDependency);
-            }
-
-            Assert.assertTrue(matchDependencies(missingDependencies, qaDependencyList));
-        } catch (Exception e) {
-            e.printStackTrace();
-            throw new TestNGException(e);
-        }
-    }
-
-    private boolean matchDependencies(List<String> fromJob, List<String> qaList) {
-        if (fromJob.size() != qaList.size()) {
-            return false;
-        }
-        Collections.sort(fromJob);
-        Collections.sort(qaList);
-        for (int index = 0; index < fromJob.size(); index++) {
-            if (!fromJob.get(index).contains(qaList.get(index))) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    private List<String> getQADepedencyList(Calendar nominalTime, Date startRef,
-                                            Date endRef, int frequency, Bundle bundle) {
-        LOGGER.info("start ref:" + startRef);
-        LOGGER.info("end ref:" + endRef);
-        Calendar initialTime = Calendar.getInstance();
-        initialTime.setTime(startRef);
-        Calendar finalTime = Calendar.getInstance();
-
-        finalTime.setTime(endRef);
-        String path = bundle.getDatasetPath();
-
-        TimeZone tz = TimeZone.getTimeZone("GMT");
-        nominalTime.setTimeZone(tz);
-        LOGGER.info("nominalTime: " + initialTime.getTime());
-        LOGGER.info("finalTime: " + finalTime.getTime());
-        List<String> returnList = new ArrayList<>();
-        while (initialTime.getTime().before(finalTime.getTime())) {
-            LOGGER.info("initialTime: " + initialTime.getTime());
-            returnList.add(getPath(path, initialTime));
-            initialTime.add(Calendar.MINUTE, frequency);
-        }
-        returnList.add(getPath(path, initialTime));
-        Collections.reverse(returnList);
-        return returnList;
-    }
-
-    private String getPath(String path, Calendar time) {
-        if (path.contains("${YEAR}")) {
-            path = path.replaceAll("\\$\\{YEAR\\}", Integer.toString(time.get(Calendar.YEAR)));
-        }
-        if (path.contains("${MONTH}")) {
-            path = path.replaceAll("\\$\\{MONTH\\}", intToString(time.get(Calendar.MONTH) + 1, 2));
-        }
-        if (path.contains("${DAY}")) {
-            path = path.replaceAll("\\$\\{DAY\\}", intToString(time.get(Calendar.DAY_OF_MONTH), 2));
-        }
-        if (path.contains("${HOUR}")) {
-            path = path.replaceAll("\\$\\{HOUR\\}", intToString(time.get(Calendar.HOUR_OF_DAY), 2));
-        }
-        if (path.contains("${MINUTE}")) {
-            path = path.replaceAll("\\$\\{MINUTE\\}", intToString(time.get(Calendar.MINUTE), 2));
-        }
-        return path;
-    }
-
-    private String intToString(int num, int digits) {
-        assert digits > 0 : "Invalid number of digits";
-
-        // create variable length array of zeros
-        char[] zeros = new char[digits];
-        Arrays.fill(zeros, '0');
-
-        // format number as String
-        DecimalFormat df = new DecimalFormat(String.valueOf(zeros));
-        return df.format(num);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/EmbeddedPigScriptTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/EmbeddedPigScriptTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/EmbeddedPigScriptTest.java
deleted file mode 100644
index c49c381..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/EmbeddedPigScriptTest.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.entity.v0.process.EngineType;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * Embedded pig script test.
- */
-@Test(groups = "embedded")
-public class EmbeddedPigScriptTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String pigTestDir = cleanAndGetTestDir();
-    private String pigScriptDir = pigTestDir + "/pig";
-    private String pigScriptLocation = pigScriptDir + "/id.pig";
-    private String inputPath = pigTestDir + "/input" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(EmbeddedPigScriptTest.class);
-    private static final double TIMEOUT = 15;
-    private String processName;
-    private String process;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-
-        //copy pig script
-        HadoopUtil.uploadDir(clusterFS, pigScriptDir, OSUtil.concat(OSUtil.RESOURCES, "pig"));
-        Bundle bundle = BundleUtil.readELBundle();
-        bundle.generateUniqueBundle(this);
-        bundle = new Bundle(bundle, cluster);
-        String startDate = "2010-01-02T00:40Z";
-        String endDate = "2010-01-02T01:10Z";
-        bundle.setInputFeedDataPath(inputPath);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 20);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT,
-            bundle.getFeedDataPathPrefix(), dataDates);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(inputPath);
-        bundles[0].setOutputFeedLocationData(pigTestDir + "/output-data" + MINUTE_DATE_PATTERN);
-        bundles[0].setProcessWorkflow(pigScriptLocation);
-        bundles[0].setProcessInputNames("INPUT");
-        bundles[0].setProcessOutputNames("OUTPUT");
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:10Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-
-        final ProcessMerlin processElement = bundles[0].getProcessObject();
-        processElement.clearProperties().withProperty("queueName", "default");
-        processElement.getWorkflow().setEngine(EngineType.PIG);
-        bundles[0].setProcessData(processElement.toString());
-        bundles[0].submitFeedsScheduleProcess(prism);
-        process = bundles[0].getProcessData();
-        processName = Util.readEntityName(process);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 600000)
-    public void getResumedProcessInstance() throws Exception {
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-        prism.getProcessHelper().suspend(process);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        ServiceResponse status = prism.getProcessHelper().getStatus(process);
-        Assert.assertTrue(status.getMessage().contains("SUSPENDED"), "Process not suspended.");
-        prism.getProcessHelper().resume(process);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 600000)
-    public void getSuspendedProcessInstance() throws Exception {
-        prism.getProcessHelper().suspend(process);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.SUSPENDED);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccessWOInstances(r);
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 600000)
-    public void getRunningProcessInstance() throws Exception {
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 600000)
-    public void getKilledProcessInstance() throws Exception {
-        prism.getProcessHelper().delete(process);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    @Test(groups = {"singleCluster"}, timeOut = 6000000)
-    public void getSucceededProcessInstance() throws Exception {
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-        int counter = OSUtil.IS_WINDOWS ? 100 : 50;
-        OozieUtil.waitForBundleToReachState(clusterOC, bundles[0].getProcessName(), Job.Status.SUCCEEDED, counter);
-        r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccessWOInstances(r);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ExternalFSTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ExternalFSTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ExternalFSTest.java
deleted file mode 100644
index 728b797..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ExternalFSTest.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.MatrixUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.List;
-import java.util.UUID;
-
-/**
- * Tests for operations with external file systems.
- */
-@Test(groups = "embedded")
-public class ExternalFSTest extends BaseTestClass{
-
-    public static final String WASB_END_POINT =
-            "wasb://" + MerlinConstants.WASB_CONTAINER + "@" + MerlinConstants.WASB_ACCOUNT;
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private FileSystem wasbFS;
-    private Bundle externalBundle;
-
-    private String baseTestDir = cleanAndGetTestDir();
-    private String sourcePath = baseTestDir + "/source";
-    private String baseWasbDir = "/falcon-regression/" + UUID.randomUUID().toString().split("-")[0];
-    private String testWasbTargetDir = baseWasbDir + '/'
-        + UUID.randomUUID().toString().split("-")[0] + '/';
-
-    private static final Logger LOGGER = Logger.getLogger(ExternalFSTest.class);
-
-    @BeforeClass
-    public void setUpClass() throws IOException {
-        HadoopUtil.recreateDir(clusterFS, baseTestDir);
-        Configuration conf = new Configuration();
-        conf.set("fs.defaultFS", WASB_END_POINT);
-        conf.set("fs.azure.account.key." + MerlinConstants.WASB_ACCOUNT,
-                MerlinConstants.WASB_SECRET);
-        conf.setBoolean("fs.hdfs.impl.disable.cache", false);
-        wasbFS = FileSystem.get(conf);
-        LOGGER.info("creating base wasb dir" + baseWasbDir);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws JAXBException, IOException {
-        Bundle bundle = BundleUtil.readFeedReplicationBundle();
-
-        bundles[0] = new Bundle(bundle, cluster);
-        externalBundle = new Bundle(bundle, cluster);
-
-        bundles[0].generateUniqueBundle(this);
-        externalBundle.generateUniqueBundle(this);
-
-        LOGGER.info("checking wasb credentials with location: " + testWasbTargetDir);
-        wasbFS.create(new Path(testWasbTargetDir));
-        wasbFS.delete(new Path(testWasbTargetDir), true);
-    }
-
-    @AfterMethod
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-        wasbFS.delete(new Path(testWasbTargetDir), true);
-    }
-
-    @AfterClass(alwaysRun = true)
-    public void tearDownClass() throws IOException {
-        wasbFS.delete(new Path(baseWasbDir), true);
-    }
-
-
-    @Test(dataProvider = "getInvalidTargets")
-    public void invalidCredentialsExtFS(String endpoint) throws Exception {
-        bundles[0].setClusterInterface(Interfacetype.READONLY, endpoint);
-        bundles[0].setClusterInterface(Interfacetype.WRITE, endpoint);
-
-        AssertUtil.assertFailed(prism.getClusterHelper()
-            .submitEntity(bundles[0].getClusterElement().toString()));
-
-    }
-
-    @Test(dataProvider = "getData")
-    public void replicateToExternalFS(final FileSystem externalFS,
-        final String separator, final boolean withData) throws Exception {
-        final String endpoint = externalFS.getUri().toString();
-        Bundle.submitCluster(bundles[0], externalBundle);
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 5);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        String datePattern = StringUtils .join(
-            new String[]{"${YEAR}", "${MONTH}", "${DAY}", "${HOUR}", "${MINUTE}"}, separator);
-
-        //configure feed
-        FeedMerlin feed = new FeedMerlin(bundles[0].getDataSets().get(0));
-        String targetDataLocation = endpoint + testWasbTargetDir + datePattern;
-        feed.setFilePath(sourcePath + '/' + datePattern);
-        //erase all clusters from feed definition
-        feed.clearFeedClusters();
-        //set local cluster as source
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        //set externalFS cluster as target
-        feed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(externalBundle.getClusters().get(0)))
-                .withRetention("days(1000000)", ActionType.DELETE)
-                .withValidity(startTime, endTime)
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(targetDataLocation)
-                .build());
-
-        //submit and schedule feed
-        LOGGER.info("Feed : " + Util.prettyPrintXml(feed.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed.toString()));
-        datePattern = StringUtils.join(new String[]{"yyyy", "MM", "dd", "HH", "mm"}, separator);
-        //upload necessary data
-        DateTime date = new DateTime(startTime, DateTimeZone.UTC);
-        DateTimeFormatter fmt = DateTimeFormat.forPattern(datePattern);
-        String timePattern = fmt.print(date);
-        HadoopUtil.recreateDir(clusterFS, sourcePath + '/' + timePattern);
-        if (withData) {
-            HadoopUtil.copyDataToFolder(clusterFS, sourcePath + '/' + timePattern, OSUtil.SINGLE_FILE);
-        }
-
-        Path srcPath = new Path(sourcePath + '/' + timePattern);
-        Path dstPath = new Path(endpoint + testWasbTargetDir + '/' + timePattern);
-
-        //check if coordinator exists
-        TimeUtil.sleepSeconds(10);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, feed.toString(), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(clusterOC, feed.getName(), "REPLICATION"), 1);
-
-        //replication should start, wait while it ends
-        InstanceUtil.waitTillInstanceReachState(clusterOC, Util.readEntityName(feed.toString()), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.FEED);
-
-        //check if data has been replicated correctly
-        List<Path> cluster1ReplicatedData =
-                HadoopUtil.getAllFilesRecursivelyHDFS(clusterFS, srcPath);
-        List<Path> cluster2ReplicatedData =
-                HadoopUtil.getAllFilesRecursivelyHDFS(externalFS, dstPath);
-        AssertUtil.checkForListSizes(cluster1ReplicatedData, cluster2ReplicatedData);
-        final ContentSummary srcSummary = clusterFS.getContentSummary(srcPath);
-        final ContentSummary dstSummary = externalFS.getContentSummary(dstPath);
-        Assert.assertEquals(dstSummary.getLength(), srcSummary.getLength());
-    }
-
-
-
-    @DataProvider
-    public Object[][] getData() {
-        //"-" for single directory, "/" - for dir with subdirs };
-        return MatrixUtil.crossProduct(new FileSystem[]{wasbFS},
-            new String[]{"/", "-"},
-            new Boolean[]{true, false});
-    }
-
-    @DataProvider
-    public Object[][] getInvalidTargets() {
-        return new Object[][]{{"wasb://invalid@invalid.blob.core.windows.net/"}};
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedClusterUpdateTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedClusterUpdateTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedClusterUpdateTest.java
deleted file mode 100644
index feb0cc1..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedClusterUpdateTest.java
+++ /dev/null
@@ -1,678 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.core.util.XmlUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed cluster update tests.
- */
-@Test(groups = "distributed")
-public class FeedClusterUpdateTest extends BaseTestClass {
-
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private ColoHelper cluster1 = servers.get(0);
-    private ColoHelper cluster2 = servers.get(1);
-    private ColoHelper cluster3 = servers.get(2);
-    private OozieClient cluster1OC = serverOC.get(0);
-    private OozieClient cluster2OC = serverOC.get(1);
-    private OozieClient cluster3OC = serverOC.get(2);
-    private FileSystem cluster2FS = serverFS.get(1);
-    private FileSystem cluster3FS = serverFS.get(2);
-    private String feed;
-    private String feedName;
-    private String startTime;
-    private String feedOriginalSubmit;
-    private String feedUpdated;
-    private String cluster1Name;
-    private String cluster2Name;
-    private String cluster3Name;
-    private static final Logger LOGGER = Logger.getLogger(FeedClusterUpdateTest.class);
-
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        Bundle bundle = BundleUtil.readELBundle();
-        for (int i = 0; i < 3; i++) {
-            bundles[i] = new Bundle(bundle, servers.get(i));
-            bundles[i].generateUniqueBundle(this);
-            bundles[i].setProcessWorkflow(aggregateWorkflowDir);
-        }
-        try {
-            String postFix = "/US/" + servers.get(1).getClusterHelper().getColoName();
-            HadoopUtil.deleteDirIfExists(baseTestDir, cluster2FS);
-            HadoopUtil.lateDataReplenish(cluster2FS, 80, 1, baseTestDir, postFix);
-            postFix = "/UK/" + servers.get(2).getClusterHelper().getColoName();
-            HadoopUtil.deleteDirIfExists(baseTestDir, cluster3FS);
-            HadoopUtil.lateDataReplenish(cluster3FS, 80, 1, baseTestDir, postFix);
-        } finally {
-            removeTestClassEntities();
-        }
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        Bundle bundle = BundleUtil.readELBundle();
-        for (int i = 0; i < 3; i++) {
-            bundles[i] = new Bundle(bundle, servers.get(i));
-            bundles[i].generateUniqueBundle(this);
-            bundles[i].setProcessWorkflow(aggregateWorkflowDir);
-        }
-        BundleUtil.submitAllClusters(prism, bundles[0], bundles[1], bundles[2]);
-        feed = bundles[0].getDataSets().get(0);
-        feed = FeedMerlin.fromString(feed).clearFeedClusters().toString();
-        startTime = TimeUtil.getTimeWrtSystemTime(-50);
-        feedName = Util.readEntityName(feed);
-        cluster1Name = Util.readEntityName(bundles[0].getClusters().get(0));
-        cluster2Name = Util.readEntityName(bundles[1].getClusters().get(0));
-        cluster3Name = Util.readEntityName(bundles[2].getClusters().get(0));
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void addSourceCluster() throws Exception {
-        //add one source and one target , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = cluster2.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 0);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void addTargetCluster() throws Exception {
-        //add one source and one target , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = cluster2.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 0);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Updated Feed: " + Util.prettyPrintXml(feedUpdated));
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void add2SourceCluster() throws Exception {
-        //add one source , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = cluster2.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 0);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Updated Feed: " + Util.prettyPrintXml(feedUpdated));
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void add2TargetCluster() throws Exception {
-        //add one source and one target , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = cluster2.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 0);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-
-        LOGGER.info("Updated Feed: " + Util.prettyPrintXml(feedUpdated));
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void add1Source1TargetCluster() throws Exception {
-        //add one source and one target , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = cluster2.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 0);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Updated Feed: " + Util.prettyPrintXml(feedUpdated));
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void deleteSourceCluster() throws Exception {
-        //add one source and one target , schedule only on source
-        feedOriginalSubmit = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = prism.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .build())
-            .toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        response = cluster3.getFeedHelper().getEntityDefinition(feedUpdated);
-        AssertUtil.assertFailed(response);
-
-        prism.getFeedHelper().submitAndSchedule(feedUpdated);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 3);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 2);
-    }
-
-    @Test(enabled = true, groups = {"multiCluster"})
-    public void deleteTargetCluster() throws Exception {
-        /*
-        this test creates a multiCluster feed. Cluster1 is the target cluster
-         and cluster3 and Cluster2 are the source cluster.
-
-        feed is submitted through prism so submitted to both target and
-        source. Feed is scheduled through prism, so only on Cluster3 and
-        Cluster2 retention coord should exists. Cluster1 one which
-         is target both retention and replication coord should exists. there
-         will be 2 replication coord, one each for each source cluster.
-
-        then we update feed by deleting cluster1 and cluster2 from the feed
-        xml and send update request.
-
-        Once update is over. definition should go missing from cluster1 and
-        cluster2 and prism and cluster3 should have new def
-
-        there should be a new retention coord on cluster3 and old number of
-        coord on cluster1 and cluster2
-         */
-
-        //add two source and one target
-        feedOriginalSubmit = FeedMerlin.fromString(feed).clearFeedClusters().toString();
-
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit)
-            .addFeedCluster(new FeedMerlin.FeedClusterBuilder(cluster2Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(startTime, TimeUtil.addMinsToTime(startTime, 65))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("US/${cluster.colo}")
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster1Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 20),
-                    TimeUtil.addMinsToTime(startTime, 85))
-                .withClusterType(ClusterType.TARGET)
-                .build())
-            .toString();
-        feedOriginalSubmit = FeedMerlin.fromString(feedOriginalSubmit).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedOriginalSubmit));
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(10);
-        AssertUtil.assertSucceeded(response);
-
-        //schedule on source
-        response = prism.getFeedHelper().schedule(feedOriginalSubmit);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-
-        //prepare updated Feed
-        feedUpdated = FeedMerlin.fromString(feed).clearFeedClusters().toString();
-        feedUpdated = FeedMerlin.fromString(feedUpdated).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(cluster3Name)
-                .withRetention("hours(10)", ActionType.DELETE)
-                .withValidity(TimeUtil.addMinsToTime(startTime, 40),
-                    TimeUtil.addMinsToTime(startTime, 110))
-                .withClusterType(ClusterType.SOURCE)
-                .withPartition("UK/${cluster.colo}")
-                .build())
-            .toString();
-
-        LOGGER.info("Feed: " + Util.prettyPrintXml(feedUpdated));
-        response = prism.getFeedHelper().update(feedUpdated, feedUpdated);
-        TimeUtil.sleepSeconds(20);
-        AssertUtil.assertSucceeded(response);
-
-        //verify xmls definitions
-        response = cluster1.getFeedHelper().getEntityDefinition(feedUpdated);
-        AssertUtil.assertFailed(response);
-        response = cluster2.getFeedHelper().getEntityDefinition(feedUpdated);
-        AssertUtil.assertFailed(response);
-        response = cluster3.getFeedHelper().getEntityDefinition(feedUpdated);
-        Assert.assertTrue(XmlUtil.isIdentical(feedUpdated, response.getMessage()));
-        response = prism.getFeedHelper().getEntityDefinition(feedUpdated);
-        Assert.assertTrue(XmlUtil.isIdentical(feedUpdated, response.getMessage()));
-
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster2OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "REPLICATION"), 0);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster3OC, feedName, "RETENTION"), 1);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "REPLICATION"), 2);
-        Assert.assertEquals(OozieUtil.checkIfFeedCoordExist(cluster1OC, feedName, "RETENTION"), 1);
-    }
-
-    /*
-    @Test(enabled = false)
-    public void delete2SourceCluster() {
-
-    }
-
-    @Test(enabled = false)
-    public void delete2TargetCluster() {
-
-    }
-
-    @Test(enabled = false)
-    public void delete1Source1TargetCluster() {
-
-    }
-    */
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceListingTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceListingTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceListingTest.java
deleted file mode 100644
index ecb5798..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedInstanceListingTest.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.FeedInstanceResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Test for https://issues.apache.org/jira/browse/FALCON-761.
- */
-@Test(groups = "embedded", timeOut = 900000)
-public class FeedInstanceListingTest extends BaseTestClass{
-    private String baseTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private String feedInputPath = baseTestDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private String processName;
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-
-    private static final Logger LOGGER = Logger.getLogger(FeedInstanceListingTest.class);
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setInputFeedPeriodicity(5, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessPeriodicity(5, Frequency.TimeUnit.minutes);
-        processName = bundles[0].getProcessName();
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException{
-        cleanTestsDirs();
-        removeTestClassEntities();
-    }
-
-    /**
-     * Test when all data is available for all instances.
-     */
-    @Test
-    public void testFeedListingWhenAllAvailable() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        List<List<String>> missingDependencies = OozieUtil.createMissingDependencies(cluster,
-                EntityType.PROCESS, processName, 0);
-        List<String> missingDependencyLastInstance = missingDependencies.get(missingDependencies.size()-1);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.SINGLE_FILE, missingDependencyLastInstance);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        FeedInstanceResult r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 0, 0, 0, 5);
-    }
-
-   /**
-    *Test when only empty directories exist for all instances.
-    */
-    @Test
-    public void testFeedListingWhenAllEmpty() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        FeedInstanceResult r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 0, 5, 0, 0);
-    }
-
-   /**
-    * Test when no data is present for any instance.
-    */
-    @Test
-    public void testFeedListingWhenAllMissing() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        FeedInstanceResult r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 5, 0, 0, 0);
-    }
-
-   /**
-    * Initially no availability flag is set for the feed. And data is created, so instance status is available.
-    * Then, set the availability flag and update the feed. The instance status should change to partial.
-    */
-    @Test
-    public void testFeedListingAfterFeedAvailabilityFlagUpdate() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        List<List<String>> missingDependencies = OozieUtil.createMissingDependencies(cluster,
-                EntityType.PROCESS, processName, 0);
-        List<String> missingDependencyLastInstance = missingDependencies.get(missingDependencies.size()-1);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.SINGLE_FILE, missingDependencyLastInstance);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        FeedInstanceResult r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 0, 0, 0, 5);
-        String inputFeed = bundles[0].getInputFeedFromBundle();
-        bundles[0].setInputFeedAvailabilityFlag("_SUCCESS");
-        ServiceResponse serviceResponse = prism.getFeedHelper().update(inputFeed, bundles[0].getInputFeedFromBundle());
-        AssertUtil.assertSucceeded(serviceResponse);
-        //Since we have not created availability flag on HDFS, the feed instance status should be partial
-        r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 0, 0, 5, 0);
-    }
-
-   /**
-    * Data is created for the feed, so instance status is available.
-    * Then, change the data path and update the feed. The instance status should change to partial.
-    */
-    @Test
-    public void testFeedListingAfterFeedDataPathUpdate() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        List<List<String>> missingDependencies = OozieUtil.createMissingDependencies(cluster,
-                EntityType.PROCESS, processName, 0);
-        List<String> missingDependencyLastInstance = missingDependencies.get(missingDependencies.size()-1);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.SINGLE_FILE, missingDependencyLastInstance);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        FeedInstanceResult r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 0, 0, 0, 5);
-        String inputFeed = bundles[0].getInputFeedFromBundle();
-        bundles[0].setInputFeedDataPath(baseTestDir + "/inputNew" + MINUTE_DATE_PATTERN);
-        ServiceResponse serviceResponse = prism.getFeedHelper().update(inputFeed, bundles[0].getInputFeedFromBundle());
-        AssertUtil.assertSucceeded(serviceResponse);
-        //Since we have not created directories for new path, the feed instance status should be missing
-        r = prism.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 5, 0, 0, 0);
-    }
-
-   /**
-    * Submit the feeds on prism, and request for instance status on server. Request should succeed.
-    */
-    @Test
-    public void testFeedListingFeedSubmitOnPrismRequestOnServer() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:21Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        FeedInstanceResult r = cluster.getFeedHelper()
-                .getFeedInstanceListing(Util.readEntityName(bundles[0].getDataSets().get(0)),
-                        "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        validateResponse(r, 5, 5, 0, 0, 0);
-    }
-
-    /**
-     * Checks that actual number of instances with different statuses are equal to expected number
-     * of instances with matching statuses.
-     *
-     * @param instancesResult kind of response from API which should contain information about
-     *                        instances <p/>
-     *                        All parameters below reflect number of expected instances with some
-     *                        kind of status.
-     * @param totalCount      total number of instances.
-     * @param missingCount    number of running instances.
-     * @param emptyCount  number of suspended instance.
-     * @param partialCount    number of waiting instance.
-     * @param availableCount     number of killed instance.
-     */
-    private void validateResponse(FeedInstanceResult instancesResult, int totalCount,
-                                 int missingCount, int emptyCount, int partialCount, int availableCount) {
-        FeedInstanceResult.Instance[] instances = instancesResult.getInstances();
-        LOGGER.info("instances: " + Arrays.toString(instances));
-        Assert.assertNotNull(instances, "instances should be not null");
-        Assert.assertEquals(instances.length, totalCount, "Total Instances");
-        List<String> statuses = new ArrayList<>();
-        for (FeedInstanceResult.Instance instance : instances) {
-            Assert.assertNotNull(instance.getCluster());
-            Assert.assertNotNull(instance.getInstance());
-            Assert.assertNotNull(instance.getStatus());
-            Assert.assertNotNull(instance.getUri());
-            Assert.assertNotNull(instance.getCreationTime());
-            Assert.assertNotNull(instance.getSize());
-            final String status = instance.getStatus();
-            LOGGER.info("status: "+ status + ", instance: " + instance.getInstance());
-            statuses.add(status);
-        }
-
-        Assert.assertEquals(Collections.frequency(statuses, "MISSING"),
-                missingCount, "Missing Instances");
-        Assert.assertEquals(Collections.frequency(statuses, "EMPTY"),
-                emptyCount, "Empty Instances");
-        Assert.assertEquals(Collections.frequency(statuses, "PARTIAL"),
-                partialCount, "Partial Instances");
-        Assert.assertEquals(Collections.frequency(statuses, "AVAILABLE"),
-                availableCount, "Available Instances");
-    }
-}


[45/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/EntityList.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/EntityList.java b/client/src/main/java/org/apache/falcon/resource/EntityList.java
deleted file mode 100644
index b91bdbe..0000000
--- a/client/src/main/java/org/apache/falcon/resource/EntityList.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Process;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlElementWrapper;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Entity list used for marshalling / unmarshalling with REST calls.
- */
-@XmlRootElement(name = "entities")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class EntityList {
-    public static final String INPUT_TAG = "Input";
-    public static final String OUTPUT_TAG = "Output";
-
-    public int getTotalResults() {
-        return totalResults;
-    }
-
-    @XmlElement
-    private int totalResults;
-
-    @XmlElement(name = "entity")
-    private final EntityElement[] elements;
-
-    /**
-     * List of fields returned by RestAPI.
-     */
-    public static enum EntityFieldList {
-        TYPE, NAME, STATUS, TAGS, PIPELINES, CLUSTERS
-    }
-
-    /**
-     * Filter by these Fields is supported by RestAPI.
-     */
-    public static enum EntityFilterByFields {
-        TYPE, NAME, STATUS, PIPELINES, CLUSTER, TAGS
-    }
-
-    /**
-     * Element within an entity.
-     */
-    public static class EntityElement {
-        //SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-        @XmlElement
-        public String type;
-        @XmlElement
-        public String name;
-        @XmlElement
-        public String status;
-        @XmlElementWrapper(name = "tags")
-        public List<String> tag;
-        @XmlElementWrapper(name = "pipelines")
-        public List<String> pipeline;
-        @XmlElementWrapper(name = "clusters")
-        public List<String> cluster;
-
-        //RESUME CHECKSTYLE CHECK VisibilityModifierCheck
-
-        @Override
-        public String toString() {
-            String outString = "(" + type + ") " + name;
-            if (StringUtils.isNotEmpty(status)) {
-                outString += "(" + status + ")";
-            }
-
-            if (tag != null && !tag.isEmpty()) {
-                outString += " - " + tag.toString();
-            }
-
-            if (pipeline != null && !pipeline.isEmpty()) {
-                outString += " - " + pipeline.toString();
-            }
-
-            if (cluster != null && !cluster.isEmpty()) {
-                outString += " - " + cluster.toString();
-            }
-
-            outString += "\n";
-            return outString;
-        }
-    }
-
-    //For JAXB
-    public EntityList() {
-        this.elements = null;
-        this.totalResults = 0;
-    }
-
-    public EntityList(EntityElement[] elements, int totalResults) {
-        this.totalResults = totalResults;
-        this.elements = elements;
-    }
-
-    public EntityList(Entity[] elements, int totalResults) {
-        this.totalResults = totalResults;
-        int len = elements.length;
-        EntityElement[] items = new EntityElement[len];
-        for (int i = 0; i < len; i++) {
-            items[i] = createEntityElement(elements[i]);
-        }
-        this.elements = items;
-    }
-
-    private EntityElement createEntityElement(Entity e) {
-        EntityElement element = new EntityElement();
-        element.type = e.getEntityType().name().toLowerCase();
-        element.name = e.getName();
-        element.status = null;
-        element.tag = new ArrayList<String>();
-        element.pipeline = new ArrayList<String>();
-        element.cluster = new ArrayList<String>();
-        return element;
-    }
-
-    public EntityList(Entity[] dependentEntities, Entity entity) {
-        int len = dependentEntities.length;
-        this.totalResults = len;
-        EntityElement[] items = new EntityElement[len];
-        for (int i = 0; i < len; i++) {
-            Entity e = dependentEntities[i];
-            EntityElement o = new EntityElement();
-            o.type = e.getEntityType().name().toLowerCase();
-            o.name = e.getName();
-            o.status = null;
-            o.tag = getEntityTag(e, entity);
-            items[i] = o;
-        }
-        this.elements = items;
-    }
-
-    public EntityElement[] getElements() {
-        return elements;
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder buffer = new StringBuilder();
-        buffer.append(totalResults + "\n");
-        for (EntityElement element : elements) {
-            buffer.append(element.toString());
-        }
-        return buffer.toString();
-    }
-
-    private List<String> getEntityTag(Entity dependentEntity, Entity entity) {
-        List<String> tagList = new ArrayList<String>();
-
-        if (entity.getEntityType().equals(EntityType.CLUSTER)) {
-            return tagList;
-        }
-
-        Process process = null;
-        String entityNameToMatch = null;
-        if (dependentEntity.getEntityType().equals(EntityType.PROCESS)) {
-            process = (Process) dependentEntity;
-            entityNameToMatch = entity.getName();
-        } else if (dependentEntity.getEntityType().equals(EntityType.FEED)
-                && entity.getEntityType().equals(EntityType.PROCESS)) {
-            process = (Process) entity;
-            entityNameToMatch = dependentEntity.getName();
-        }
-
-        if (process != null) {
-            if (process.getInputs() != null) {
-                for (Input i : process.getInputs().getInputs()) {
-                    if (i.getFeed().equals(entityNameToMatch)) {
-                        tagList.add(INPUT_TAG);
-                    }
-                }
-            }
-            if (process.getOutputs() != null) {
-                for (Output o : process.getOutputs().getOutputs()) {
-                    if (o.getFeed().equals(entityNameToMatch)) {
-                        tagList.add(OUTPUT_TAG);
-                    }
-                }
-            }
-        }
-
-        return tagList;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/EntitySummaryResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/EntitySummaryResult.java b/client/src/main/java/org/apache/falcon/resource/EntitySummaryResult.java
deleted file mode 100644
index 3ebfe26..0000000
--- a/client/src/main/java/org/apache/falcon/resource/EntitySummaryResult.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Arrays;
-import java.util.Date;
-
-/**
- * Pojo for JAXB marshalling / unmarshalling.
- */
-//SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-@XmlRootElement
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class EntitySummaryResult extends APIResult {
-
-    /**
-     * Workflow status as being set in result object.
-     */
-    public static enum WorkflowStatus {
-        WAITING, RUNNING, SUSPENDED, KILLED, FAILED, SUCCEEDED, ERROR, READY
-    }
-
-    @XmlElement
-    private EntitySummary[] entitySummaries;
-
-    //For JAXB
-    public EntitySummaryResult() {
-        super();
-    }
-
-    public EntitySummaryResult(String message, EntitySummary[] entitySummaries) {
-        this(Status.SUCCEEDED, message, entitySummaries);
-    }
-
-    public EntitySummaryResult(Status status, String message, EntitySummary[] entitySummaries) {
-        super(status, message);
-        this.entitySummaries = entitySummaries;
-    }
-
-    public EntitySummaryResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public EntitySummary[] getEntitySummaries() {
-        return this.entitySummaries;
-    }
-
-    public void setEntitySummaries(EntitySummary[] entitySummaries) {
-        this.entitySummaries = entitySummaries;
-    }
-
-    /**
-     * A single entity object inside entity summary result.
-     */
-    @XmlRootElement(name = "entitySummary")
-    public static class EntitySummary {
-
-        @XmlElement
-        public String type;
-        @XmlElement
-        public String name;
-        @XmlElement
-        public String status;
-        @XmlElement
-        public String[] tags;
-        @XmlElement
-        public String[] pipelines;
-        @XmlElement
-        public Instance[] instances;
-
-        public EntitySummary() {
-        }
-
-        public EntitySummary(String entityName, String entityType) {
-            this.name = entityName;
-            this.type = entityType;
-        }
-
-        public EntitySummary(String name, String type, String status,
-                             String[] tags, String[] pipelines,
-                             Instance[] instances) {
-            this.name = name;
-            this.type = type;
-            this.status = status;
-            this.pipelines = pipelines;
-            this.tags = tags;
-            this.instances = instances;
-        }
-
-        public String getName() {
-            return this.name;
-        }
-
-        public String getType() {
-            return this.type;
-        }
-
-        public String getStatus() {
-            return this.status;
-        }
-
-        public String[] getTags() {
-            return this.tags;
-        }
-
-        public String[] getPipelines() {
-            return this.pipelines;
-        }
-
-        public Instance[] getInstances() {
-            return this.instances;
-        }
-
-        @Override
-        public String toString() {
-            return "{Entity: " + (this.name == null ? "" : this.name)
-                    + ", Type: " + (this.type == null ? "" : this.type)
-                    + ", Status: " + (this.status == null ? "" : this.status)
-                    + ", Tags: " + (this.tags == null ? "[]" : Arrays.toString(this.tags))
-                    + ", Pipelines: " + (this.pipelines == null ? "[]" : Arrays.toString(this.pipelines))
-                    + ", InstanceSummary: " + (this.instances == null ? "[]" : Arrays.toString(this.instances))
-                    +"}";
-        }
-    }
-
-    /**
-     * A single instance object inside instance result.
-     */
-    @XmlRootElement(name = "instances")
-    public static class Instance {
-        @XmlElement
-        public String instance;
-
-        @XmlElement
-        public WorkflowStatus status;
-
-        @XmlElement
-        public String logFile;
-
-        @XmlElement
-        public String cluster;
-
-        @XmlElement
-        public String sourceCluster;
-
-        @XmlElement
-        public Date startTime;
-
-        @XmlElement
-        public Date endTime;
-
-        public Instance() {
-        }
-
-        public Instance(String cluster, String instance, WorkflowStatus status) {
-            this.cluster = cluster;
-            this.instance = instance;
-            this.status = status;
-        }
-
-        public String getInstance() {
-            return instance;
-        }
-
-        public WorkflowStatus getStatus() {
-            return status;
-        }
-
-        public String getLogFile() {
-            return logFile;
-        }
-
-        public String getCluster() {
-            return cluster;
-        }
-
-        public String getSourceCluster() {
-            return sourceCluster;
-        }
-
-        public Date getStartTime() {
-            return startTime;
-        }
-
-        public Date getEndTime() {
-            return endTime;
-        }
-
-        @Override
-        public String toString() {
-            return "{instance: " + (this.instance == null ? "" : this.instance)
-                    + ", status: " + (this.status == null ? "" : this.status)
-                    + (this.logFile == null ? "" : ", log: " + this.logFile)
-                    + (this.sourceCluster == null ? "" : ", source-cluster: " + this.sourceCluster)
-                    + (this.cluster == null ? "" : ", cluster: " + this.cluster)
-                    + (this.startTime == null ? "" : ", startTime: " + this.startTime)
-                    + (this.endTime == null ? "" : ", endTime: " + this.endTime)
-                    + "}";
-        }
-    }
-}
-//RESUME CHECKSTYLE CHECK VisibilityModifierCheck

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/FeedInstanceResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/FeedInstanceResult.java b/client/src/main/java/org/apache/falcon/resource/FeedInstanceResult.java
deleted file mode 100644
index 75f0b9a..0000000
--- a/client/src/main/java/org/apache/falcon/resource/FeedInstanceResult.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.apache.commons.io.FileUtils;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Pojo for JAXB marshalling / unmarshalling.
- */
-//SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-@XmlRootElement
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class FeedInstanceResult extends APIResult {
-
-    @XmlElement
-    private Instance[] instances;
-
-    private FeedInstanceResult() { // for jaxb
-        super();
-    }
-
-    public FeedInstanceResult(String message, Instance[] instances) {
-        this(Status.SUCCEEDED, message, instances);
-    }
-
-    public FeedInstanceResult(Status status, String message,
-                              Instance[] inInstances) {
-        super(status, message);
-        this.instances = inInstances;
-    }
-
-    public FeedInstanceResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public Instance[] getInstances() {
-        return instances;
-    }
-
-    public void setInstances(Instance[] instances) {
-        this.instances = instances;
-    }
-
-    @Override
-    public Object[] getCollection() {
-        return getInstances();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setInstances(new Instance[0]);
-        } else {
-            Instance[] newInstances = new Instance[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (Instance)items[index];
-            }
-            setInstances(newInstances);
-        }
-    }
-
-    /**
-     * A single instance object inside instance result.
-     */
-    @XmlRootElement(name = "instance")
-    public static class Instance {
-        @XmlElement
-        public String cluster;
-
-        @XmlElement
-        public String instance;
-
-        @XmlElement
-        public String status;
-
-        @XmlElement
-        public String uri;
-
-        @XmlElement
-        public long creationTime;
-
-        @XmlElement
-        public long size;
-
-        @XmlElement
-        public String sizeH;
-
-        public Instance() {
-        }
-
-        public Instance(String cluster, String instance, String status) {
-            this.cluster = cluster;
-            this.instance = instance;
-            this.status = status;
-        }
-
-        public String getInstance() {
-            return instance;
-        }
-
-        public String getStatus() {
-            return status;
-        }
-
-        public String getUri() {
-            return uri;
-        }
-
-        public String getCluster() {
-            return cluster;
-        }
-
-        public long getCreationTime() {
-            return creationTime;
-        }
-
-        public Long getSize() {
-            return size;
-        }
-
-        public String getSizeH(){
-            return FileUtils.byteCountToDisplaySize(size);
-        }
-
-        @Override
-        public String toString() {
-            return "{instance:"
-                    + this.instance
-                    + ", status:"
-                    + this.status
-                    + (this.uri == null ? "" : ", uri: " + this.uri)
-                    + (this.cluster == null ? "" : ", cluster:" + this.cluster) + "}";
-        }
-    }
-}
-//RESUME CHECKSTYLE CHECK VisibilityModifierCheck

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/FeedLookupResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/FeedLookupResult.java b/client/src/main/java/org/apache/falcon/resource/FeedLookupResult.java
deleted file mode 100644
index f8d58ae..0000000
--- a/client/src/main/java/org/apache/falcon/resource/FeedLookupResult.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.feed.LocationType;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Entity list used for marshalling / unmarshalling with REST calls.
- */
-@XmlRootElement(name = "feeds")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class FeedLookupResult extends APIResult {
-
-    @XmlElement(name = "elements")
-    private FeedProperties[] elements;
-
-    //For JAXB
-    private FeedLookupResult() {
-        super();
-    }
-
-    public FeedLookupResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public FeedProperties[] getElements() {
-        return elements;
-    }
-
-    public void setElements(FeedProperties[] elements) {
-        this.elements = elements;
-    }
-
-
-    @Override
-    public Object[] getCollection() {
-        return getElements();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setElements(new FeedProperties[0]);
-        } else {
-            FeedProperties[] newInstances = new FeedProperties[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (FeedProperties)items[index];
-            }
-            setElements(newInstances);
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder buffer = new StringBuilder();
-        if (elements != null) {
-            for (FeedProperties element : elements) {
-                buffer.append(element.toString());
-                buffer.append("\n");
-            }
-        }
-        return buffer.toString();
-    }
-
-    /**
-     * A single instance in the result.
-     */
-    @XmlRootElement(name = "feed")
-    @XmlAccessorType(XmlAccessType.FIELD)
-    public static class FeedProperties {
-        @XmlElement
-        private String feedName;
-
-        @XmlElement
-        private LocationType locationType;
-
-        @XmlElement
-        private String clusterName;
-
-        public FeedProperties(String feedName, LocationType locationType, String clusterName){
-            this.clusterName = clusterName;
-            this.locationType = locationType;
-            this.feedName = feedName;
-        }
-
-        //for JAXB
-        private FeedProperties(){}
-
-        public void setFeedName(String feedName) {
-            this.feedName = feedName;
-        }
-
-        public void setLocationType(LocationType locationType) {
-            this.locationType = locationType;
-        }
-
-        public void setClusterName(String clusterName) {
-            this.clusterName = clusterName;
-        }
-
-        public String getFeedName() {
-            return this.feedName;
-        }
-
-        public LocationType getLocationType() {
-            return this.locationType;
-        }
-
-        public String getClusterName() {
-            return this.clusterName;
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) {
-                return true;
-            }
-            if (o == null || getClass() != o.getClass()) {
-                return false;
-            }
-            FeedProperties that = (FeedProperties) o;
-            if (!StringUtils.equals(clusterName, that.clusterName)) {
-                return false;
-            }
-            if (locationType != that.locationType) {
-                return false;
-            }
-            if (!StringUtils.equals(feedName, that.feedName)) {
-                return false;
-            }
-            return true;
-        }
-
-        @Override
-        public int hashCode() {
-            int result = feedName.hashCode();
-            result = 31 * result + (locationType != null ? locationType.hashCode() : 0);
-            result = 31 * result + (clusterName != null ? clusterName.hashCode() : 0);
-            return result;
-        }
-
-        @Override
-        public String toString() {
-            return feedName + "  (CLUSTER:" + clusterName + ")  (LocationType:" + locationType.name() + ")";
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/InstanceDependencyResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/InstanceDependencyResult.java b/client/src/main/java/org/apache/falcon/resource/InstanceDependencyResult.java
deleted file mode 100644
index 0751f12..0000000
--- a/client/src/main/java/org/apache/falcon/resource/InstanceDependencyResult.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Entity list used for marshalling / unmarshalling with REST calls.
- */
-@XmlRootElement(name = "dependents")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class InstanceDependencyResult extends APIResult {
-
-    @XmlElement(name = "dependencies")
-    private SchedulableEntityInstance[] dependencies;
-
-    //For JAXB
-    private InstanceDependencyResult() {
-        super();
-    }
-
-    public InstanceDependencyResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public SchedulableEntityInstance[] getDependencies() {
-        return dependencies;
-    }
-
-    public void setDependencies(SchedulableEntityInstance[] dependencies) {
-        this.dependencies = dependencies;
-    }
-
-
-    @Override
-    public Object[] getCollection() {
-        return getDependencies();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setDependencies(new SchedulableEntityInstance[0]);
-        } else {
-            SchedulableEntityInstance[] newInstances = new SchedulableEntityInstance[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (SchedulableEntityInstance)items[index];
-            }
-            setDependencies(newInstances);
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder buffer = new StringBuilder();
-        if (dependencies != null) {
-            for (SchedulableEntityInstance element : dependencies) {
-                buffer.append(element.toString());
-                buffer.append("\n");
-            }
-        }
-        return buffer.toString();
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/InstancesResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/InstancesResult.java b/client/src/main/java/org/apache/falcon/resource/InstancesResult.java
deleted file mode 100644
index e12c083..0000000
--- a/client/src/main/java/org/apache/falcon/resource/InstancesResult.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Date;
-
-/**
- * Pojo for JAXB marshalling / unmarshalling.
- */
-//SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-@XmlRootElement
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class InstancesResult extends APIResult {
-
-    /**
-     * Workflow status as being set in result object.
-     */
-    public static enum WorkflowStatus {
-        WAITING, RUNNING, SUSPENDED, KILLED, FAILED, SUCCEEDED, ERROR, SKIPPED, UNDEFINED, READY
-    }
-
-    /**
-     * RestAPI supports filterBy these fields of instance.
-     */
-    public static enum InstanceFilterFields {
-        STATUS, CLUSTER, SOURCECLUSTER, STARTEDAFTER
-    }
-
-    @XmlElement
-    private Instance[] instances;
-
-    private InstancesResult() { // for jaxb
-        super();
-    }
-
-    public InstancesResult(Status status, String message) {
-        super(status, message);
-    }
-
-
-    public Instance[] getInstances() {
-        return instances;
-    }
-
-    public void setInstances(Instance[] instances) {
-        this.instances = instances;
-    }
-
-    @Override
-    public Object[] getCollection() {
-        return getInstances();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setInstances(new Instance[0]);
-        } else {
-            Instance[] newInstances = new Instance[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (Instance)items[index];
-            }
-            setInstances(newInstances);
-        }
-    }
-
-    /**
-     * A single instance object inside instance result.
-     */
-    @XmlRootElement(name = "instance")
-    public static class Instance {
-        @XmlElement
-        public String instance;
-
-        @XmlElement
-        public WorkflowStatus status;
-
-        @XmlElement
-        public String logFile;
-
-        @XmlElement
-        public String cluster;
-
-        @XmlElement
-        public String sourceCluster;
-
-        @XmlElement
-        public Date startTime;
-
-        @XmlElement
-        public Date endTime;
-
-        @XmlElement
-        public int runId;
-
-        @XmlElement
-        public String details;
-
-        @XmlElement
-        public InstanceAction[] actions;
-
-        @XmlElement(name="wfParams")
-        public KeyValuePair[] wfParams;
-
-        public Instance() {
-        }
-
-        public Instance(String cluster, String instance, WorkflowStatus status) {
-            this.cluster = cluster;
-            this.instance = instance;
-            this.status = status;
-        }
-
-        public String getInstance() {
-            return instance;
-        }
-
-        public WorkflowStatus getStatus() {
-            return status;
-        }
-
-        public String getLogFile() {
-            return logFile;
-        }
-
-        public String getCluster() {
-            return cluster;
-        }
-
-        public String getSourceCluster() {
-            return sourceCluster;
-        }
-
-        public Date getStartTime() {
-            return startTime;
-        }
-
-        public Date getEndTime() {
-            return endTime;
-        }
-
-        public int getRunId() {
-            return runId;
-        }
-
-        public InstanceAction[] getActions() {
-            return actions;
-        }
-
-        public String getDetails() {
-            return details;
-        }
-
-        public KeyValuePair[] getWfParams() { return wfParams; }
-
-        @Override
-        public String toString() {
-            return "{instance:"
-                    + this.instance
-                    + ", status:"
-                    + this.status
-                    + (this.logFile == null ? "" : ", log:" + this.logFile)
-                    + (this.sourceCluster == null ? "" : ", source-cluster:"
-                    + this.sourceCluster)
-                    + (this.cluster == null ? "" : ", cluster:"
-                    + this.cluster) + "}";
-        }
-    }
-
-    /**
-     * Instance action inside an instance object.
-     */
-    @XmlRootElement(name = "actions")
-    public static class InstanceAction {
-        @XmlElement
-        public String action;
-        @XmlElement
-        public String status;
-        @XmlElement
-        public String logFile;
-
-        public InstanceAction() {
-        }
-
-        public InstanceAction(String action, String status, String logFile) {
-            this.action = action;
-            this.status = status;
-            this.logFile = logFile;
-        }
-
-        public String getAction() {
-            return action;
-        }
-
-        public String getStatus() {
-            return status;
-        }
-
-        public String getLogFile() {
-            return logFile;
-        }
-
-        @Override
-        public String toString() {
-            return "{action:" + this.action + ", status:" + this.status
-                    + (this.logFile == null ? "" : ", log:" + this.logFile)
-                    + "}";
-        }
-    }
-
-    /**
-     * POJO for key value parameters.
-     */
-    @XmlRootElement(name = "params")
-    public static class KeyValuePair {
-        @XmlElement
-        public String key;
-        @XmlElement
-        public String value;
-
-        public KeyValuePair(String key, String value) {
-            this.key = key;
-            this.value = value;
-        }
-
-        public KeyValuePair() { }
-
-        public String getKey() {
-            return key;
-        }
-
-        public String getValue() {
-            return value;
-        }
-
-        @Override
-        public String toString() {
-            return "{key:" + this.key + ", value:" + this.value + "}";
-        }
-    }
-}
-//RESUME CHECKSTYLE CHECK VisibilityModifierCheck

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/InstancesSummaryResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/InstancesSummaryResult.java b/client/src/main/java/org/apache/falcon/resource/InstancesSummaryResult.java
deleted file mode 100644
index aa0db99..0000000
--- a/client/src/main/java/org/apache/falcon/resource/InstancesSummaryResult.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementWrapper;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Map;
-
-/**
- * Pojo for JAXB marshalling / unmarshalling.
- */
-
-//SUSPEND CHECKSTYLE CHECK VisibilityModifierCheck
-@XmlRootElement
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class InstancesSummaryResult extends APIResult {
-
-    /**
-     * RestAPI supports filterBy these fields of instanceSummary.
-     */
-    public static enum InstanceSummaryFilterFields {
-        STATUS, CLUSTER
-    }
-
-    @XmlElement
-    private InstanceSummary[] instancesSummary;
-
-    private InstancesSummaryResult() { // for jaxb
-        super();
-    }
-
-    public InstancesSummaryResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public InstanceSummary[] getInstancesSummary() {
-        return instancesSummary;
-    }
-
-    public void setInstancesSummary(InstanceSummary[] instancesSummary) {
-        this.instancesSummary = instancesSummary;
-    }
-
-    @Override
-    public Object[] getCollection() {
-        return getInstancesSummary();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setInstancesSummary(new InstanceSummary[0]);
-        } else {
-            InstanceSummary[] newInstances = new InstanceSummary[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (InstanceSummary)items[index];
-            }
-            setInstancesSummary(newInstances);
-        }
-    }
-
-    /**
-     * A single instance object inside instance result.
-     */
-    @XmlRootElement(name = "instance-summary")
-    public static class InstanceSummary {
-
-        @XmlElement
-        public String cluster;
-        @XmlElementWrapper(name="map")
-        public Map<String, Long> summaryMap;
-
-        public InstanceSummary() {
-        }
-
-        public InstanceSummary(String cluster, Map<String, Long> summaryMap) {
-            this.cluster = cluster;
-            this.summaryMap = summaryMap;
-        }
-
-        public Map<String, Long> getSummaryMap() {
-            return summaryMap;
-        }
-
-        public String getCluster() {
-            return cluster;
-        }
-
-        @Override
-        public String toString() {
-            return "cluster: " + (this.cluster == null ? "" : this.cluster)
-                    + "summaryMap: " + summaryMap.toString();
-        }
-    }
-
-}
-//RESUME CHECKSTYLE CHECK VisibilityModifierCheck

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/LineageGraphResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/LineageGraphResult.java b/client/src/main/java/org/apache/falcon/resource/LineageGraphResult.java
deleted file mode 100644
index 0e10e38..0000000
--- a/client/src/main/java/org/apache/falcon/resource/LineageGraphResult.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.apache.commons.lang3.StringUtils;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * LineageGraphResult is the output returned by all the apis returning a DAG.
- */
-@XmlRootElement(name = "result")
-@XmlAccessorType (XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class LineageGraphResult {
-
-    private String[] vertices;
-
-    @XmlElement(name="edges")
-    private Edge[] edges;
-
-    private static final JAXBContext JAXB_CONTEXT;
-
-    static {
-        try {
-            JAXB_CONTEXT = JAXBContext.newInstance(LineageGraphResult.class);
-        } catch (JAXBException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public LineageGraphResult() {
-        // default constructor for JAXB
-    }
-
-    /**
-     * A class to represent an edge in a DAG.
-     */
-    @XmlRootElement(name = "edge")
-    @XmlAccessorType(XmlAccessType.FIELD)
-    public static class Edge {
-        @XmlElement
-        private String from;
-        @XmlElement
-        private String to;
-        @XmlElement
-        private String label;
-
-        public Edge() {
-
-        }
-
-        public Edge(String from, String to, String label) {
-            this.from = from;
-            this.to = to;
-            this.label = label;
-        }
-
-        public String getFrom() {
-            return from;
-        }
-
-        public void setFrom(String from) {
-            this.from = from;
-        }
-
-        public String getTo() {
-            return to;
-        }
-
-        public void setTo(String to) {
-            this.to = to;
-        }
-
-        public String getLabel() {
-            return label;
-        }
-
-        public void setLabel(String label) {
-            this.label = label;
-        }
-
-        public String getDotNotation() {
-            StringBuilder result = new StringBuilder();
-            if (StringUtils.isNotBlank(this.from) && StringUtils.isNotBlank(this.to)
-                    && StringUtils.isNotBlank(this.label)) {
-                result.append("\"" + this.from +"\"");
-                result.append(" -> ");
-                result.append("\"" + this.to + "\"");
-                result.append(" [ label = \"" + this.label + "\" ] \n");
-            }
-            return result.toString();
-        }
-
-        @Override
-        public String toString() {
-            return getDotNotation();
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (o == this) {
-                return true;
-            }
-
-            if (o == null || getClass() != o.getClass()) {
-                return false;
-            }
-
-            Edge that = (Edge) o;
-            if (StringUtils.equals(that.getLabel(), label) && StringUtils.equals(that.getFrom(), from)
-                    && StringUtils.equals(that.getTo(), to)) {
-                return true;
-            }
-            return false;
-        }
-
-        @Override
-        public int hashCode() {
-            int result = from != null ? from.hashCode() : 0;
-            result = 31 * result + (to != null ? to.hashCode() : 0);
-            result = 31 * result + (label != null ? label.hashCode() : 0);
-            return result;
-        }
-
-    }
-
-
-    public String getDotNotation() {
-        StringBuilder result = new StringBuilder();
-        result.append("digraph g{ \n");
-        if (this.vertices != null) {
-            for (String v : this.vertices) {
-                result.append("\"" + v + "\"");
-                result.append("\n");
-            }
-        }
-
-        if (this.edges != null) {
-            for (Edge e : this.edges) {
-                result.append(e.getDotNotation());
-            }
-        }
-        result.append("}\n");
-        return result.toString();
-    }
-
-    public String[] getVertices() {
-        return vertices;
-    }
-
-    public void setVertices(String[] vertices) {
-        this.vertices = vertices;
-    }
-
-    public Edge[] getEdges() {
-        return edges;
-    }
-
-    public void setEdges(Edge[] edges) {
-        this.edges = edges;
-    }
-
-
-    @Override
-    public String toString() {
-        return getDotNotation();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstance.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstance.java b/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstance.java
deleted file mode 100644
index 0968734..0000000
--- a/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstance.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.SchemaHelper;
-
-import java.util.Date;
-
-/**
- * Instance of a Schedulable Entity (Feed/Process).
- */
-public class SchedulableEntityInstance implements Comparable<SchedulableEntityInstance> {
-
-    public static final String INPUT = "Input";
-    public static final String OUTPUT = "Output";
-
-    private String entityName;
-
-    private String cluster;
-
-    private Date instanceTime;
-
-    private EntityType entityType;
-
-    private String tags;
-
-    //for JAXB
-    private SchedulableEntityInstance() {
-
-    }
-
-    public SchedulableEntityInstance(String entityName, String cluster, Date instanceTime, EntityType type) {
-        this.entityName = entityName;
-        this.cluster = cluster;
-        this.entityType = type;
-        if (instanceTime != null) {
-            this.instanceTime = new Date(instanceTime.getTime());
-        }
-    }
-
-    public String getTags() {
-        return tags;
-    }
-
-    public void setTags(String tags) {
-        this.tags = tags;
-    }
-
-    public String getEntityName() {
-        return entityName;
-    }
-
-    public void setEntityName(String entityName) {
-        this.entityName = entityName;
-    }
-
-    public String getCluster() {
-        return cluster;
-    }
-
-    public void setCluster(String cluster) {
-        this.cluster = cluster;
-    }
-
-    public EntityType getEntityType() {
-        return entityType;
-    }
-
-    public void setEntityType(EntityType entityType) {
-        this.entityType = entityType;
-    }
-
-    public Date getInstanceTime() {
-        return new Date(instanceTime.getTime());
-    }
-
-    public void setInstanceTime(Date instanceTime) {
-        this.instanceTime = new Date(instanceTime.getTime());
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder sb = new StringBuilder();
-        sb.append("name: " + entityName
-                + ", type: " + entityType
-                + ", cluster: " + cluster
-                + ", instanceTime: " + SchemaHelper.formatDateUTC(instanceTime));
-        sb.append(", tags: " + ((tags != null) ? tags : ""));
-        return sb.toString();
-    }
-
-    @Override
-    public boolean equals(Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-
-        SchedulableEntityInstance that = (SchedulableEntityInstance) o;
-
-        if (instanceTime == null ? that.instanceTime != null : !instanceTime.equals(that.instanceTime)) {
-            return false;
-        }
-
-        if (!entityType.equals(that.entityType)) {
-            return false;
-        }
-
-        if (!StringUtils.equals(entityName, that.entityName)) {
-            return false;
-        }
-
-        if (!StringUtils.equals(cluster, that.cluster)) {
-            return false;
-        }
-
-        if (!StringUtils.equals(tags, that.tags)) {
-            return false;
-        }
-
-        return true;
-    }
-
-    @Override
-    public int hashCode() {
-        int result = instanceTime.hashCode();
-        result = 31 * result + entityName.hashCode();
-        result = 31 * result + entityType.hashCode();
-        result = 31 * result + cluster.hashCode();
-        if (tags != null) {
-            result = 31 * result + tags.hashCode();
-        }
-        return result;
-    }
-
-    @Override
-    public int compareTo(SchedulableEntityInstance o) {
-        int result = this.cluster.compareTo(o.cluster);
-        if (result != 0) {
-            return result;
-        }
-
-        result = this.entityType.compareTo(o.entityType);
-        if (result != 0) {
-            return result;
-        }
-
-        result = this.entityName.compareToIgnoreCase(o.entityName);
-        if (result != 0) {
-            return result;
-        }
-
-        return this.instanceTime.compareTo(o.instanceTime);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstanceResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstanceResult.java b/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstanceResult.java
deleted file mode 100644
index 752c48d..0000000
--- a/client/src/main/java/org/apache/falcon/resource/SchedulableEntityInstanceResult.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Arrays;
-
-/**
- * Instances list used for marshalling / unmarshalling with REST calls.
- */
-@XmlRootElement(name = "instances")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class SchedulableEntityInstanceResult extends APIResult {
-
-    @XmlElement(name = "instances")
-    private SchedulableEntityInstance[] instances;
-
-    //For JAXB
-    private SchedulableEntityInstanceResult() {
-        super();
-    }
-
-    public SchedulableEntityInstanceResult(Status status, String message) {
-        super(status, message);
-    }
-
-    public SchedulableEntityInstance[] getInstances() {
-        return instances;
-    }
-
-    public void setInstances(SchedulableEntityInstance[] instances) {
-        this.instances = instances;
-    }
-
-
-    @Override
-    public Object[] getCollection() {
-        return getInstances();
-    }
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setInstances(new SchedulableEntityInstance[0]);
-        } else {
-            SchedulableEntityInstance[] newInstances = new SchedulableEntityInstance[items.length];
-            for (int index = 0; index < items.length; index++) {
-                newInstances[index] = (SchedulableEntityInstance)items[index];
-            }
-            setInstances(newInstances);
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder buffer = new StringBuilder();
-        if (instances != null) {
-            Arrays.sort(instances);
-            for (SchedulableEntityInstance element : instances) {
-                buffer.append(element.toString());
-                buffer.append("\n");
-            }
-        }
-        return buffer.toString();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/resource/TriageResult.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/resource/TriageResult.java b/client/src/main/java/org/apache/falcon/resource/TriageResult.java
deleted file mode 100644
index 131e2e1..0000000
--- a/client/src/main/java/org/apache/falcon/resource/TriageResult.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.resource;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Resut for instance triage.
- */
-@XmlRootElement(name = "triage")
-@XmlAccessorType(XmlAccessType.FIELD)
-@edu.umd.cs.findbugs.annotations.SuppressWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
-public class TriageResult extends APIResult {
-
-    @XmlElement(name = "triageGraphs")
-    private LineageGraphResult[] triageGraphs;
-
-    //For JAXB
-    private TriageResult() {
-        super();
-    }
-
-    public TriageResult(Status status, String message) {
-        super(status, message);
-    }
-
-
-
-    public LineageGraphResult[] getTriageGraphs() {
-        return triageGraphs;
-    }
-
-    public void setTriageGraphs(LineageGraphResult[] triageGraphs) {
-        this.triageGraphs = triageGraphs;
-    }
-
-
-    @Override
-    public Object[] getCollection() {
-        return getTriageGraphs();
-    }
-
-
-    @Override
-    public void setCollection(Object[] items) {
-        if (items == null) {
-            setTriageGraphs(new LineageGraphResult[0]);
-        } else {
-            LineageGraphResult[] graphs = new LineageGraphResult[items.length];
-            for (int index = 0; index < items.length; index++) {
-                graphs[index] = (LineageGraphResult)items[index];
-            }
-            setTriageGraphs(graphs);
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder buffer = new StringBuilder();
-        if (triageGraphs != null) {
-            for (LineageGraphResult graph : triageGraphs) {
-                buffer.append(graph.getDotNotation());
-                buffer.append("\n\n");
-            }
-        }
-        return buffer.toString();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/cluster-0.1.xsd
----------------------------------------------------------------------
diff --git a/client/src/main/resources/cluster-0.1.xsd b/client/src/main/resources/cluster-0.1.xsd
deleted file mode 100644
index 34e3689..0000000
--- a/client/src/main/resources/cluster-0.1.xsd
+++ /dev/null
@@ -1,211 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" attributeFormDefault="unqualified" elementFormDefault="qualified"
-           targetNamespace="uri:falcon:cluster:0.1" xmlns="uri:falcon:cluster:0.1"
-           xmlns:jaxb="http://java.sun.com/xml/ns/jaxb" jaxb:version="2.1">
-    <xs:annotation>
-        <xs:documentation>
-            Licensed to the Apache Software Foundation (ASF) under one or more
-            contributor license agreements. See the NOTICE file distributed with
-            this work for additional information regarding copyright ownership.
-            The ASF licenses this file to You under the Apache License, Version
-            2.0
-            (the "License"); you may not use this file except in compliance with
-            the License. You may obtain a copy of the License at
-
-            http://www.apache.org/licenses/LICENSE-2.0
-
-            Unless required by applicable law or agreed to in writing, software
-            distributed under the License is distributed on an "AS IS" BASIS,
-            WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-            implied.
-            See the License for the specific language governing permissions and
-            limitations under the License.
-        </xs:documentation>
-        <xs:appinfo>
-            <jaxb:schemaBindings>
-                <jaxb:package name="org.apache.falcon.entity.v0.cluster"/>
-            </jaxb:schemaBindings>
-        </xs:appinfo>
-    </xs:annotation>
-    <xs:element name="cluster" type="cluster">
-    </xs:element>
-    <xs:complexType name="cluster">
-        <xs:annotation>
-            <xs:documentation>The cluster contains the definition of different
-                interfaces which are used by Falcon like readonly, write, workflow and messaging.
-                A cluster is referenced by feeds and processes which are on-boarded
-                to Falcon by its name.
-                name: the name of cluster, which must be unique.
-                colo: the name of the colo to which this cluster belongs to.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="KEY_VALUE_PAIR" name="tags" minOccurs="0">
-                <xs:annotation>
-                    <xs:documentation>
-                        tags: a process specifies an optional list of comma separated tags,
-                        Key Value Pairs, separated by comma,
-                        which is used for classification of processes.
-                        Example: consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="interfaces" name="interfaces"/>
-            <xs:element type="locations" name="locations"/>
-            <xs:element type="ACL" name="ACL" minOccurs="0" maxOccurs="1"/>
-            <xs:element type="properties" name="properties" minOccurs="0"/>
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="description"/>
-        <xs:attribute type="xs:string" name="colo" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="locations">
-        <xs:annotation>
-            <xs:documentation>A list of locations on cluster.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="location" name="location" maxOccurs="unbounded" minOccurs="1"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="property">
-        <xs:annotation>
-            <xs:documentation>
-                A key-value pair, which are propagated to the
-                workflow engine.
-                Ideally the Mapred's job queue name (queueName) and
-                JMS impl class nameof messaging engine (brokerImplClass)
-                should be defined here.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="value" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="interface">
-        <xs:annotation>
-            <xs:documentation>
-                An interface specifies the interface type, Falcon uses it to schedule
-                entities in workflow engine, to save and read data from hadoop and to
-                publish messages to messaging engine.
-                endpoint: is the url for each interface; examples: for write it is the
-                url of hdfs (fs.defaultFS) and
-                for workflow it is url of workflow engine like oozie.
-                version: The current runtime version of each interface.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="interfacetype" name="type" use="required"/>
-        <xs:attribute type="xs:string" name="endpoint" use="required"/>
-        <xs:attribute type="xs:string" name="version" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="properties">
-        <xs:annotation>
-            <xs:documentation>
-                A list of property elements.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="property" name="property" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="location">
-        <xs:annotation>
-            <xs:documentation>
-                Location has the name and the path.
-                name: is the type of locations which can be
-                staging, temp and working only.
-                staging is a mandatory type.
-                path: the hdfs path for each location.
-                Falcon would use the location to do intermediate
-                processing of entities in hdfs and hence Falcon
-                should have read/write/execute permission on these
-                locations.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="cluster-location-type" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="path" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="interfaces">
-        <xs:annotation>
-            <xs:documentation>
-                A list of interfaces.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="interface" name="interface" maxOccurs="unbounded" minOccurs="3"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:simpleType name="interfacetype">
-        <xs:annotation>
-            <xs:documentation>
-                An interface has 6 different interface types: readonly, write,
-                execute, workflow, messaging, registry.
-                readonly specifies the hadoop's hftp address, it's endpoint is the value of
-                dfs.http.address.ex: hftp://corp.namenode:50070/
-                write specifies the interface to write to hdfs, it's endpoint is the value
-                of fs.defaultFS ex: hdfs://corp.namenode:8020
-                execute specifies the interface for job tracker, it's endpoint is the value
-                of mapreduce.jobtracker.address. ex:corp.jt:8021
-                workflow specifies the interface for workflow engine, example of it's
-                endpoint is value for OOZIE_URL.ex: http://corp.oozie:11000/oozie
-                messaging specifies the interface for sending feed availability messages, it's
-                endpoint is broker url with tcp address.ex: tcp://corp.messaging:61616?daemon=true
-                registry specifies the interface for Hcatalog.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="readonly"/>
-            <xs:enumeration value="write"/>
-            <xs:enumeration value="execute"/>
-            <xs:enumeration value="workflow"/>
-            <xs:enumeration value="messaging"/>
-            <xs:enumeration value="registry"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="IDENTIFIER">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(([a-zA-Z]([\-a-zA-Z0-9])*){1,39})"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="KEY_VALUE_PAIR">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="([\w_]+=[^,]+)?([,]?[ ]*[\w_]+=[^,]+)*"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="ACL">
-        <xs:annotation>
-            <xs:documentation>
-                Access control list for this cluster.
-                owner is the Owner of this entity.
-                group is the one which has access to read - not used at this time.
-                permission is not enforced at this time
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="owner"/>
-        <xs:attribute type="xs:string" name="group"/>
-        <xs:attribute type="xs:string" name="permission" default="*"/>
-    </xs:complexType>
-    <xs:simpleType name="cluster-location-type">
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="staging"/>
-            <xs:enumeration value="working"/>
-            <xs:enumeration value="temp"/>
-        </xs:restriction>
-    </xs:simpleType>
-</xs:schema>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/resources/datasource-0.1.xsd
----------------------------------------------------------------------
diff --git a/client/src/main/resources/datasource-0.1.xsd b/client/src/main/resources/datasource-0.1.xsd
deleted file mode 100644
index 1202ba1..0000000
--- a/client/src/main/resources/datasource-0.1.xsd
+++ /dev/null
@@ -1,276 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" attributeFormDefault="unqualified" elementFormDefault="qualified"
-           targetNamespace="uri:falcon:datasource:0.1" xmlns="uri:falcon:datasource:0.1"
-           xmlns:jaxb="http://java.sun.com/xml/ns/jaxb" jaxb:version="2.1">
-    <xs:annotation>
-        <xs:documentation>
-            Licensed to the Apache Software Foundation (ASF) under one or more
-            contributor license agreements. See the NOTICE file distributed with
-            this work for additional information regarding copyright ownership.
-            The ASF licenses this file to You under the Apache License, Version
-            2.0
-            (the "License"); you may not use this file except in compliance with
-            the License. You may obtain a copy of the License at
-
-            http://www.apache.org/licenses/LICENSE-2.0
-
-            Unless required by applicable law or agreed to in writing, software
-            distributed under the License is distributed on an "AS IS" BASIS,
-            WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-            implied.
-            See the License for the specific language governing permissions and
-            limitations under the License.
-        </xs:documentation>
-        <xs:appinfo>
-            <jaxb:schemaBindings>
-                <jaxb:package name="org.apache.falcon.entity.v0.datasource"/>
-            </jaxb:schemaBindings>
-        </xs:appinfo>
-    </xs:annotation>
-    <xs:element name="datasource" type="datasource">
-    </xs:element>
-    <xs:complexType name="datasource">
-        <xs:annotation>
-            <xs:documentation>The datasource contains various information required
-                to connect to a data source like a MySQL datasource or Kafka cluster.
-                A datasource is referenced by feeds that represent an object like
-                Table (or Topic) in the MySQL database (or Kafka Cluster).
-                name: the name of datasource, which must be unique.
-                colo: the name of the colo to which this datasource belongs to.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="KEY_VALUE_PAIR" name="tags" minOccurs="0" maxOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>
-                        tags: a process specifies an optional list of comma separated tags,
-                        Key Value Pairs, separated by comma,
-                        which is used for classification of datasource entity.
-                        Example: consumer=consumer@xyz.com, owner=producer@xyz.com, department=forecasting
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="interfaces" name="interfaces"/>
-            <xs:element type="driver" name="driver" minOccurs="1" maxOccurs="1" />
-            <xs:element type="properties" name="properties" minOccurs="0"/>
-            <xs:element type="ACL" name="ACL" minOccurs="0" maxOccurs="1"/>
-        </xs:sequence>
-        <xs:attribute type="IDENTIFIER" name="name" use="required"/>
-        <xs:attribute type="xs:string"  name="colo" use="required"/>
-        <xs:attribute type="xs:string"  name="description"/>
-        <xs:attribute type="datasource-type"  name="type" use="required">
-            <xs:annotation>
-                <xs:documentation>
-                    datasource type could be Relational Databases (MySQL, Oracle etc.), Messgaing systems like
-                    Kafka, etc.
-                </xs:documentation>
-            </xs:annotation>
-        </xs:attribute>
-    </xs:complexType>
-    <xs:complexType name="property">
-        <xs:annotation>
-            <xs:documentation>
-                A key-value pair to pass in any datasource specific properties.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="name" use="required"/>
-        <xs:attribute type="xs:string" name="value" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="interface">
-        <xs:annotation>
-            <xs:documentation>
-                An interface specifies the interface type (read or write), and an
-                endpoint url. Falcon uses these endpoints to import or export
-                data from datasources.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="driver" name="driver" minOccurs="0" maxOccurs="1" />
-            <xs:element type="credential" name="credential" minOccurs="0" maxOccurs="1"/>
-            <xs:element type="properties" name="properties" minOccurs="0"/>
-        </xs:sequence>
-        <xs:attribute type="interfacetype" name="type" use="required"/>
-        <xs:attribute type="xs:string" name="endpoint" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="properties">
-        <xs:annotation>
-            <xs:documentation>
-                A list of property elements.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="property" name="property" maxOccurs="unbounded" minOccurs="0"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="interfaces">
-        <xs:annotation>
-            <xs:documentation>
-                A list of interfaces.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence>
-            <xs:element type="interface" name="interface" maxOccurs="2" minOccurs="1"/>
-            <xs:element type="credential" name="credential" minOccurs="0" maxOccurs="1"/>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:simpleType name="interfacetype">
-        <xs:annotation>
-            <xs:documentation>
-                An interface for datasource has 2 different interface types: readonly, write.
-                The readonly endpoint specifies the url/mechanism to use for data IMPORT operation
-                from a datasource while write endpoint specifies the url/mechanism to use for data
-                EXPORT operatrion.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="readonly"/>
-            <xs:enumeration value="write"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="IDENTIFIER">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="(([a-zA-Z]([\-a-zA-Z0-9])*){1,39})"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:simpleType name="KEY_VALUE_PAIR">
-        <xs:restriction base="xs:string">
-            <xs:pattern value="([\w_]+=[^,]+)?([,]?[ ]*[\w_]+=[^,]+)*"/>
-        </xs:restriction>
-    </xs:simpleType>
-    <xs:complexType name="credential">
-        <xs:sequence  minOccurs="1" maxOccurs="1" >
-            <xs:element name="userName" minOccurs="1" maxOccurs="1" type="xs:string">
-                <xs:annotation>
-                    <xs:documentation>
-                        The User for the datasource.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-
-            <xs:choice minOccurs="1" maxOccurs="1">
-                <xs:element name="passwordFile" type="xs:string">
-                    <xs:annotation>
-                        <xs:documentation>
-                            The FQ path to a file on HDFS containing the datasource
-                            server password with 400 permissions. Only the user
-                            submitting the job has read access to this file which
-                            will be securely passed to the mappers.
-                        </xs:documentation>
-                    </xs:annotation>
-                </xs:element>
-                <xs:element name="passwordText" type="xs:string">
-                    <xs:annotation>
-                        <xs:documentation>
-                            Plain text password.
-                        </xs:documentation>
-                    </xs:annotation>
-                </xs:element>
-                <xs:element name="passwordAlias" type="passwordAliasType">
-                    <xs:annotation>
-                        <xs:documentation>
-                            Password alias using hadoop credential store.
-                        </xs:documentation>
-                    </xs:annotation>
-                </xs:element>
-            </xs:choice>
-        </xs:sequence>
-        <xs:attribute name="type" type="credentialtype" use="required"/>
-    </xs:complexType>
-    <xs:complexType name="passwordAliasType">
-        <xs:sequence minOccurs="1" maxOccurs="1">
-            <xs:element name="alias" type="xs:string">
-                <xs:annotation>
-                    <xs:documentation> Provide password alias. </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element name="providerPath" type="xs:string">
-                <xs:annotation>
-                    <xs:documentation>jceks provider HDFS file path </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:simpleType name="credentialtype">
-        <xs:annotation>
-            <xs:documentation>
-                user-password credentials are supported today which can be extended.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="password-file" />
-            <xs:enumeration value="password-text" />
-            <xs:enumeration value="password-alias" />
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:simpleType name="datasource-type">
-        <xs:annotation>
-            <xs:documentation>
-                The datasource type can be MySQL, ORACLE, Teradata etc.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:restriction base="xs:string">
-            <xs:enumeration value="mysql"/>
-            <xs:enumeration value="oracle"/>
-            <xs:enumeration value="hsql"/>
-        </xs:restriction>
-    </xs:simpleType>
-
-    <xs:complexType name="driver">
-        <xs:annotation>
-            <xs:documentation>
-                Driver information.
-            </xs:documentation>
-        </xs:annotation>
-        <xs:sequence minOccurs="1" maxOccurs="1">
-            <xs:element type="xs:string" name="clazz" minOccurs="1" maxOccurs="1">
-                <xs:annotation>
-                    <xs:documentation>
-                        Fully qualified class name for the datasource driver used
-                        for validating the datasource connection in Falcon.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-            <xs:element type="xs:string" name="jar" minOccurs="1" maxOccurs="unbounded">
-                <xs:annotation>
-                    <xs:documentation>
-                        Path to the connector jar files on HDFS thats shipped with the workflow.
-                        You'd need to put the connector jar files in oozie sharelib and since this
-                        is using all the latest features in sqoop 1.x, requires 1.5 snapshot.
-                    </xs:documentation>
-                </xs:annotation>
-            </xs:element>
-        </xs:sequence>
-    </xs:complexType>
-    <xs:complexType name="ACL">
-        <xs:annotation>
-            <xs:documentation>
-                Access control list for this cluster.
-                owner is the Owner of this entity.
-                group is the one which has access to read - not used at this time.
-                permission is not enforced at this time
-            </xs:documentation>
-        </xs:annotation>
-        <xs:attribute type="xs:string" name="owner"/>
-        <xs:attribute type="xs:string" name="group"/>
-        <xs:attribute type="xs:string" name="permission" default="*"/>
-    </xs:complexType>
-</xs:schema>


[43/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/catalog/CatalogPartition.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/CatalogPartition.java b/common/src/main/java/org/apache/falcon/catalog/CatalogPartition.java
deleted file mode 100644
index 71194c7..0000000
--- a/common/src/main/java/org/apache/falcon/catalog/CatalogPartition.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.catalog;
-
-import java.util.List;
-
-/**
- * The CatalogPartition is a wrapper around org.apache.hive.hcatalog.api.HCatPartition.
- */
-public class CatalogPartition {
-
-    private String databaseName;
-    private String tableName;
-    private List<String> values;
-    private long createTime;
-    private long lastAccessTime;
-    private String inputFormat;
-    private String outputFormat;
-    private String location;
-    private String serdeInfo;
-    private long size = -1;
-
-    protected CatalogPartition() {
-    }
-
-    protected void setDatabaseName(String databaseName) {
-        this.databaseName = databaseName;
-    }
-
-    protected void setTableName(String tableName) {
-        this.tableName = tableName;
-    }
-
-    protected void setValues(List<String> values) {
-        this.values = values;
-    }
-
-    protected void setCreateTime(long createTime) {
-        this.createTime = createTime;
-    }
-
-    protected void setLastAccessTime(long lastAccessTime) {
-        this.lastAccessTime = lastAccessTime;
-    }
-
-    protected void setInputFormat(String inputFormat) {
-        this.inputFormat = inputFormat;
-    }
-
-    protected void setOutputFormat(String outputFormat) {
-        this.outputFormat = outputFormat;
-    }
-
-    protected void setLocation(String location) {
-        this.location = location;
-    }
-
-    protected void setSerdeInfo(String serdeInfo) {
-        this.serdeInfo = serdeInfo;
-    }
-
-    public void setSize(long size) { this.size = size; }
-
-    /**
-     * Gets the database name.
-     *
-     * @return the database name
-     */
-    public String getDatabaseName() {
-        return this.databaseName;
-    }
-
-    /**
-     * Gets the table name.
-     *
-     * @return the table name
-     */
-    public String getTableName() {
-        return this.tableName;
-    }
-
-
-    /**
-     * Gets the input format.
-     *
-     * @return the input format
-     */
-    public String getInputFormat() {
-        return this.inputFormat;
-    }
-
-    /**
-     * Gets the output format.
-     *
-     * @return the output format
-     */
-    public String getOutputFormat() {
-        return this.outputFormat;
-    }
-
-    /**
-     * Gets the location.
-     *
-     * @return the location
-     */
-    public String getLocation() {
-        return this.location;
-    }
-
-    /**
-     * Gets the serde.
-     *
-     * @return the serde
-     */
-    public String getSerDe() {
-        return this.serdeInfo;
-    }
-
-    /**
-     * Gets the last access time.
-     *
-     * @return the last access time
-     */
-    public long getLastAccessTime() {
-        return this.lastAccessTime;
-    }
-
-    /**
-     * Gets the creates the time.
-     *
-     * @return the creates the time
-     */
-    public long getCreateTime() {
-        return this.createTime;
-    }
-
-    /**
-     * Gets the values.
-     *
-     * @return the values
-     */
-    public List<String> getValues() {
-        return this.values;
-    }
-
-    /**
-     * Gets the size.
-     *
-     * @return the size
-     */
-    public long getSize() { return size; }
-
-    @Override
-    public String toString() {
-        return "CatalogPartition ["
-            + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null, ")
-            + (databaseName != null ? "dbName=" + databaseName + ", " : "dbName=null, ")
-            + (values != null ? "values=" + values + ", " : "values=null, ")
-            + "size=" + size + ", " + "createTime=" + createTime + ", lastAccessTime="
-            + lastAccessTime + "]";
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/catalog/CatalogPartitionHandler.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/CatalogPartitionHandler.java b/common/src/main/java/org/apache/falcon/catalog/CatalogPartitionHandler.java
deleted file mode 100644
index cccb4f8..0000000
--- a/common/src/main/java/org/apache/falcon/catalog/CatalogPartitionHandler.java
+++ /dev/null
@@ -1,313 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.catalog;
-
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.CatalogStorage;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.Storage;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.workflow.WorkflowExecutionContext;
-import org.apache.falcon.workflow.WorkflowExecutionListener;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.Date;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Collection;
-import java.util.Arrays;
-import java.util.TimeZone;
-import java.util.Properties;
-
-/**
- * Listens to workflow execution completion events.
- * It syncs HCat partitions based on the feeds created/evicted/replicated.
- */
-public class CatalogPartitionHandler implements WorkflowExecutionListener{
-    private static final Logger LOG = LoggerFactory.getLogger(CatalogPartitionHandler.class);
-
-    public static final ConfigurationStore STORE = ConfigurationStore.get();
-    public static final String CATALOG_TABLE = "catalog.table";
-    private ExpressionHelper evaluator = ExpressionHelper.get();
-    private static CatalogPartitionHandler catalogInstance = new CatalogPartitionHandler();
-    private static final boolean IS_CATALOG_ENABLED = CatalogServiceFactory.isEnabled();
-    public static final TimeZone UTC = TimeZone.getTimeZone("UTC");
-
-    private static final PathFilter PATH_FILTER = new PathFilter() {
-        @Override public boolean accept(Path path) {
-            try {
-                FileSystem fs = path.getFileSystem(new Configuration());
-                return !path.getName().startsWith("_") && !path.getName().startsWith(".") && !fs.isFile(path);
-            } catch (IOException e) {
-                throw new RuntimeException(e);
-            }
-        }
-    };
-
-    public static final CatalogPartitionHandler get() {
-        return catalogInstance;
-    }
-
-    @Override
-    public void onSuccess(WorkflowExecutionContext context) throws FalconException {
-        if (!IS_CATALOG_ENABLED) {
-            //Skip if catalog service is not enabled
-            return;
-        }
-
-        String[] feedNames = context.getOutputFeedNamesList();
-        String[] feedPaths = context.getOutputFeedInstancePathsList();
-        Cluster cluster = STORE.get(EntityType.CLUSTER, context.getClusterName());
-        Configuration clusterConf = ClusterHelper.getConfiguration(cluster);
-
-        if (StringUtils.isEmpty(ClusterHelper.getRegistryEndPoint(cluster))) {
-            //Skip if registry endpoint is not defined for the cluster
-            LOG.info("Catalog endpoint not defined for cluster {}. Skipping partition registration", cluster.getName());
-            return;
-        }
-
-        for (int index = 0; index < feedNames.length; index++) {
-            LOG.info("Partition handling for feed {} for path {}", feedNames[index], feedPaths[index]);
-            Feed feed = STORE.get(EntityType.FEED, feedNames[index]);
-
-            Storage storage = FeedHelper.createStorage(cluster, feed);
-            if (storage.getType() == Storage.TYPE.TABLE) {
-                //Do nothing if the feed is already table based
-                LOG.info("Feed {} is already table based. Skipping partition registration", feed.getName());
-                continue;
-            }
-
-            CatalogStorage catalogStorage = getCatalogStorageFromFeedProperties(feed, cluster, clusterConf);
-            if (catalogStorage == null) {
-                //There is no catalog defined in the feed properties. So, skip partition registration
-                LOG.info("Feed {} doesn't have table defined in its properties/table doesn't exist. "
-                        + "Skipping partition registration", feed.getName());
-                continue;
-            }
-
-            //Generate static partition values - get the date from feed path and evaluate partitions in catalog spec
-            Path feedPath = new Path(new Path(feedPaths[index]).toUri().getPath());
-
-            String templatePath = new Path(storage.getUriTemplate(LocationType.DATA)).toUri().getPath();
-            LOG.debug("Template {} catalogInstance path {}", templatePath, feedPath);
-            Date date = FeedHelper.getDate(templatePath, feedPath, UTC);
-            if (date == null) {
-                LOG.info("Feed {} catalogInstance path {} doesn't match the template {}. "
-                                + "Skipping partition registration",
-                        feed.getName(), feedPath, templatePath);
-                continue;
-            }
-
-            LOG.debug("Reference date from path {} is {}", feedPath, SchemaHelper.formatDateUTC(date));
-            ExpressionHelper.setReferenceDate(date);
-            List<String> partitionValues = new ArrayList<String>();
-            for (Map.Entry<String, String> entry : catalogStorage.getPartitions().entrySet()) {
-                LOG.debug("Evaluating partition {}", entry.getValue());
-                partitionValues.add(evaluator.evaluateFullExpression(entry.getValue(), String.class));
-            }
-
-            LOG.debug("Static partition - {}", partitionValues);
-            WorkflowExecutionContext.EntityOperations operation = context.getOperation();
-            switch (operation) {
-            case DELETE:
-                dropPartitions(clusterConf, catalogStorage, partitionValues);
-                break;
-
-            case GENERATE:
-            case REPLICATE:
-                registerPartitions(clusterConf, catalogStorage, feedPath, partitionValues);
-                break;
-
-            default:
-                throw new FalconException("Unhandled operation " + operation);
-            }
-        }
-    }
-
-    //Register additional partitions. Compare the expected partitions and the existing partitions
-    //1.exist (intersection) expected --> partition already exists, so update partition
-    //2.exist - expected --> partition is not required anymore, so drop partition
-    //3.expected - exist --> partition doesn't exist, so add partition
-    private void registerPartitions(Configuration conf, CatalogStorage storage, Path staticPath,
-                                    List<String> staticPartition) throws FalconException {
-        try {
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(conf);
-            if (!fs.exists(staticPath)) {
-                //Do nothing if the output path doesn't exist
-                return;
-            }
-
-            List<String> partitionColumns = getPartitionColumns(conf, storage);
-            int dynamicPartCols = partitionColumns.size() - staticPartition.size();
-            Path searchPath = staticPath;
-            if (dynamicPartCols > 0) {
-                searchPath = new Path(staticPath, StringUtils.repeat("*", "/", dynamicPartCols));
-            }
-
-            //Figure out the dynamic partitions from the directories on hdfs
-            FileStatus[] files = fs.globStatus(searchPath, PATH_FILTER);
-            Map<List<String>, String> partitions = new HashMap<List<String>, String>();
-            for (FileStatus file : files) {
-                List<String> dynamicParts = getDynamicPartitions(file.getPath(), staticPath);
-                List<String> partitionValues = new ArrayList<String>(staticPartition);
-                partitionValues.addAll(dynamicParts);
-                LOG.debug("Final partition - " + partitionValues);
-                partitions.put(partitionValues, file.getPath().toString());
-            }
-
-            List<List<String>> existPartitions = listPartitions(conf, storage, staticPartition);
-            Collection<List<String>> targetPartitions = partitions.keySet();
-
-            Collection<List<String>> partitionsForDrop = CollectionUtils.subtract(existPartitions, targetPartitions);
-            Collection<List<String>> partitionsForAdd = CollectionUtils.subtract(targetPartitions, existPartitions);
-            Collection<List<String>> partitionsForUpdate =
-                    CollectionUtils.intersection(existPartitions, targetPartitions);
-
-            for (List<String> partition : partitionsForDrop) {
-                dropPartitions(conf, storage, partition);
-            }
-
-            for (List<String> partition : partitionsForAdd) {
-                addPartition(conf, storage, partition, partitions.get(partition));
-            }
-
-            for (List<String> partition : partitionsForUpdate) {
-                updatePartition(conf, storage, partition, partitions.get(partition));
-            }
-        } catch(IOException e) {
-            throw new FalconException(e);
-        }
-    }
-
-    private void updatePartition(Configuration conf, CatalogStorage storage, List<String> partition, String location)
-        throws FalconException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        catalogService.updatePartition(conf, storage.getCatalogUrl(), storage.getDatabase(), storage.getTable(),
-                partition, location);
-    }
-
-    private void addPartition(Configuration conf, CatalogStorage storage, List<String> partition, String location)
-        throws FalconException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        catalogService.addPartition(conf, storage.getCatalogUrl(), storage.getDatabase(), storage.getTable(), partition,
-                location);
-    }
-
-    private List<List<String>> listPartitions(Configuration conf, CatalogStorage storage, List<String> staticPartitions)
-        throws FalconException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        List<CatalogPartition> partitions = catalogService.listPartitions(conf, storage.getCatalogUrl(),
-                storage.getDatabase(), storage.getTable(), staticPartitions);
-        List<List<String>> existPartitions = new ArrayList<List<String>>();
-        for (CatalogPartition partition : partitions) {
-            existPartitions.add(partition.getValues());
-        }
-        return existPartitions;
-    }
-
-    //Returns the dynamic partitions of the data path
-    protected List<String> getDynamicPartitions(Path path, Path staticPath) {
-        String dynPart = path.toUri().getPath().substring(staticPath.toString().length());
-        dynPart = StringUtils.removeStart(dynPart, "/");
-        dynPart = StringUtils.removeEnd(dynPart, "/");
-        if (StringUtils.isEmpty(dynPart)) {
-            return new ArrayList<String>();
-        }
-        return Arrays.asList(dynPart.split("/"));
-    }
-
-    private List<String> getPartitionColumns(Configuration conf, CatalogStorage storage) throws FalconException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        return catalogService.getPartitionColumns(conf, storage.getCatalogUrl(), storage.getDatabase(),
-                storage.getTable());
-    }
-
-    private void dropPartitions(Configuration conf, CatalogStorage storage, List<String> values)
-        throws FalconException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        catalogService.dropPartitions(conf, storage.getCatalogUrl(), storage.getDatabase(),
-                storage.getTable(), values, false);
-    }
-
-    //Get the catalog template from feed properties as feed is filesystem based
-    protected CatalogStorage getCatalogStorageFromFeedProperties(Feed feed, Cluster cluster, Configuration conf)
-        throws FalconException {
-        Properties properties = FeedHelper.getFeedProperties(feed);
-        String tableUri = properties.getProperty(CATALOG_TABLE);
-        if (tableUri == null) {
-            return null;
-        }
-
-        CatalogTable table = new CatalogTable();
-        table.setUri(tableUri.replace("{", "${"));
-        CatalogStorage storage = null;
-        try {
-            storage = new CatalogStorage(cluster, table);
-        } catch (URISyntaxException e) {
-            throw new FalconException(e);
-        }
-
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        if (!catalogService.tableExists(conf, storage.getCatalogUrl(), storage.getDatabase(), storage.getTable())) {
-            return null;
-        }
-        return storage;
-    }
-
-    @Override
-    public void onFailure(WorkflowExecutionContext context) throws FalconException {
-        //no-op
-    }
-
-    @Override
-    public void onStart(WorkflowExecutionContext context) throws FalconException {
-        // Do nothing
-    }
-
-    @Override
-    public void onSuspend(WorkflowExecutionContext context) throws FalconException {
-        // Do nothing
-    }
-
-    @Override
-    public void onWait(WorkflowExecutionContext context) throws FalconException {
-        // Do nothing
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/catalog/CatalogServiceFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/CatalogServiceFactory.java b/common/src/main/java/org/apache/falcon/catalog/CatalogServiceFactory.java
deleted file mode 100644
index 77e6851..0000000
--- a/common/src/main/java/org/apache/falcon/catalog/CatalogServiceFactory.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.catalog;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.util.StartupProperties;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory for providing appropriate catalog service
- * implementation to the falcon service.
- */
-@SuppressWarnings("unchecked")
-public final class CatalogServiceFactory {
-    private static final Logger LOG = LoggerFactory.getLogger(CatalogServiceFactory.class);
-
-    public static final String CATALOG_SERVICE = "catalog.service.impl";
-
-    private CatalogServiceFactory() {
-    }
-
-    public static boolean isEnabled() {
-        boolean isEnabled = StartupProperties.get().containsKey(CATALOG_SERVICE);
-        if (!isEnabled) {
-            LOG.info("Catalog service disabled. Partitions will not registered");
-        }
-        return isEnabled;
-    }
-
-    public static AbstractCatalogService getCatalogService() throws FalconException {
-        if (!isEnabled()) {
-            throw new FalconException(
-                "Catalog integration is not enabled in falcon. Implementation is missing: " + CATALOG_SERVICE);
-        }
-
-        return ReflectionUtils.getInstance(CATALOG_SERVICE);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java b/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
deleted file mode 100644
index 872f91f..0000000
--- a/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
+++ /dev/null
@@ -1,425 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.catalog;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.security.SecurityUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.io.Text;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * An implementation of CatalogService that uses Hive Meta Store (HCatalog)
- * as the backing Catalog registry.
- */
-public class HiveCatalogService extends AbstractCatalogService {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HiveCatalogService.class);
-    public static final String CREATE_TIME = "falcon.create_time";
-    public static final String UPDATE_TIME = "falcon.update_time";
-    public static final String PARTITION_DOES_NOT_EXIST = "Partition does not exist";
-
-
-    public static HiveConf createHiveConf(Configuration conf,
-                                           String metastoreUrl) throws IOException {
-        HiveConf hcatConf = new HiveConf(conf, HiveConf.class);
-
-        hcatConf.set("hive.metastore.local", "false");
-        hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUrl);
-        hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-        hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
-                HCatSemanticAnalyzer.class.getName());
-        hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-
-        hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-        hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-        return hcatConf;
-    }
-
-    /**
-     * This is used from with in an oozie job.
-     *
-     * @param conf conf object
-     * @param metastoreUrl metastore uri
-     * @return hive metastore client handle
-     * @throws FalconException
-     */
-    private static HiveMetaStoreClient createClient(Configuration conf,
-                                                    String metastoreUrl) throws FalconException {
-        try {
-            LOG.info("Creating HCatalog client object for metastore {} using conf {}",
-                metastoreUrl, conf.toString());
-            final Credentials credentials = getCredentials(conf);
-            Configuration jobConf = credentials != null ? copyCredentialsToConf(conf, credentials) : conf;
-            HiveConf hcatConf = createHiveConf(jobConf, metastoreUrl);
-
-            if (UserGroupInformation.isSecurityEnabled()) {
-                hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname,
-                    conf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));
-                hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
-
-                UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-                ugi.addCredentials(credentials); // credentials cannot be null
-            }
-
-            return new HiveMetaStoreClient(hcatConf);
-        } catch (Exception e) {
-            throw new FalconException("Exception creating HiveMetaStoreClient: " + e.getMessage(), e);
-        }
-    }
-
-    private static JobConf copyCredentialsToConf(Configuration conf, Credentials credentials) {
-        JobConf jobConf = new JobConf(conf);
-        jobConf.setCredentials(credentials);
-        return jobConf;
-    }
-
-    private static Credentials getCredentials(Configuration conf) throws IOException {
-        final String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
-        if (tokenFile == null) {
-            return null;
-        }
-
-        try {
-            LOG.info("Adding credentials/delegation tokens from token file={} to conf", tokenFile);
-            Credentials credentials = Credentials.readTokenStorageFile(new File(tokenFile), conf);
-            LOG.info("credentials numberOfTokens={}, numberOfSecretKeys={}",
-                credentials.numberOfTokens(), credentials.numberOfSecretKeys());
-            return credentials;
-        } catch (IOException e) {
-            LOG.warn("error while fetching credentials from {}", tokenFile);
-        }
-
-        return null;
-    }
-
-    /**
-     * This is used from with in falcon namespace.
-     *
-     * @param conf                      conf
-     * @param catalogUrl                metastore uri
-     * @return hive metastore client handle
-     * @throws FalconException
-     */
-    private static HiveMetaStoreClient createProxiedClient(Configuration conf,
-                                                           String catalogUrl) throws FalconException {
-
-        try {
-            final HiveConf hcatConf = createHiveConf(conf, catalogUrl);
-            UserGroupInformation proxyUGI = CurrentUser.getProxyUGI();
-            addSecureCredentialsAndToken(conf, hcatConf, proxyUGI);
-
-            LOG.info("Creating HCatalog client object for {}", catalogUrl);
-            return proxyUGI.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
-                public HiveMetaStoreClient run() throws Exception {
-                    return new HiveMetaStoreClient(hcatConf);
-                }
-            });
-        } catch (Exception e) {
-            throw new FalconException("Exception creating Proxied HiveMetaStoreClient: " + e.getMessage(), e);
-        }
-    }
-
-    private static void addSecureCredentialsAndToken(Configuration conf,
-                                                     HiveConf hcatConf,
-                                                     UserGroupInformation proxyUGI) throws IOException {
-        if (UserGroupInformation.isSecurityEnabled()) {
-            String metaStoreServicePrincipal = conf.get(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL);
-            hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname,
-                metaStoreServicePrincipal);
-            hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
-
-            Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(
-                hcatConf, metaStoreServicePrincipal);
-            proxyUGI.addToken(delegationTokenId);
-        }
-    }
-
-    private static Token<DelegationTokenIdentifier> getDelegationToken(HiveConf hcatConf,
-                                                                       String metaStoreServicePrincipal)
-        throws IOException {
-
-        LOG.debug("Creating delegation tokens for principal={}", metaStoreServicePrincipal);
-        HCatClient hcatClient = HCatClient.create(hcatConf);
-        String delegationToken = hcatClient.getDelegationToken(
-                CurrentUser.getUser(), metaStoreServicePrincipal);
-        hcatConf.set("hive.metastore.token.signature", "FalconService");
-
-        Token<DelegationTokenIdentifier> delegationTokenId = new Token<DelegationTokenIdentifier>();
-        delegationTokenId.decodeFromUrlString(delegationToken);
-        delegationTokenId.setService(new Text("FalconService"));
-        LOG.info("Created delegation token={}", delegationToken);
-        return delegationTokenId;
-    }
-
-    @Override
-    public boolean isAlive(Configuration conf, final String catalogUrl) throws FalconException {
-        LOG.info("Checking if the service is alive for: {}", catalogUrl);
-
-        try {
-            HiveMetaStoreClient client = createProxiedClient(conf, catalogUrl);
-            Database database = client.getDatabase("default");
-            return database != null;
-        } catch (Exception e) {
-            throw new FalconException("Exception checking if the service is alive:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public boolean tableExists(Configuration conf, final String catalogUrl, final String database,
-                               final String tableName) throws FalconException {
-        LOG.info("Checking if the table exists: {}", tableName);
-
-        try {
-            HiveMetaStoreClient client = createProxiedClient(conf, catalogUrl);
-            Table table = client.getTable(database, tableName);
-            return table != null;
-        } catch (NoSuchObjectException e) {
-            return false;
-        } catch (Exception e) {
-            throw new FalconException("Exception checking if the table exists:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public boolean isTableExternal(Configuration conf, String catalogUrl, String database,
-                                   String tableName) throws FalconException {
-        LOG.info("Checking if the table is external: {}", tableName);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            Table table = client.getTable(database, tableName);
-            return table.getTableType().equals(TableType.EXTERNAL_TABLE.name());
-        } catch (Exception e) {
-            throw new FalconException("Exception checking if the table is external:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public List<CatalogPartition> listPartitions(Configuration conf, String catalogUrl,
-                                                         String database, String tableName,
-                                                         List<String> values) throws FalconException {
-        LOG.info("List partitions for: {}, partition filter: {}", tableName, values);
-
-        try {
-            List<CatalogPartition> catalogPartitionList = new ArrayList<CatalogPartition>();
-
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            List<Partition> hCatPartitions = client.listPartitions(database, tableName, values, (short) -1);
-            for (Partition hCatPartition : hCatPartitions) {
-                LOG.debug("Partition: " + hCatPartition.getValues());
-                CatalogPartition partition = createCatalogPartition(hCatPartition);
-                catalogPartitionList.add(partition);
-            }
-
-            return catalogPartitionList;
-        } catch (Exception e) {
-            throw new FalconException("Exception listing partitions:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public List<CatalogPartition> listPartitionsByFilter(Configuration conf, String catalogUrl,
-                                                         String database, String tableName,
-                                                         String filter) throws FalconException {
-        LOG.info("List partitions for: {}, partition filter: {}", tableName, filter);
-
-        try {
-            List<CatalogPartition> catalogPartitionList = new ArrayList<CatalogPartition>();
-
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            List<Partition> hCatPartitions = client.listPartitionsByFilter(database, tableName, filter, (short) -1);
-            for (Partition hCatPartition : hCatPartitions) {
-                LOG.info("Partition: " + hCatPartition.getValues());
-                CatalogPartition partition = createCatalogPartition(hCatPartition);
-                catalogPartitionList.add(partition);
-            }
-
-            return catalogPartitionList;
-        } catch (Exception e) {
-            throw new FalconException("Exception listing partitions:" + e.getMessage(), e);
-        }
-    }
-
-    private CatalogPartition createCatalogPartition(Partition hCatPartition) {
-        final CatalogPartition catalogPartition = new CatalogPartition();
-        catalogPartition.setDatabaseName(hCatPartition.getDbName());
-        catalogPartition.setTableName(hCatPartition.getTableName());
-        catalogPartition.setValues(hCatPartition.getValues());
-        catalogPartition.setInputFormat(hCatPartition.getSd().getInputFormat());
-        catalogPartition.setOutputFormat(hCatPartition.getSd().getOutputFormat());
-        catalogPartition.setLocation(hCatPartition.getSd().getLocation());
-        catalogPartition.setSerdeInfo(hCatPartition.getSd().getSerdeInfo().getSerializationLib());
-        catalogPartition.setCreateTime(hCatPartition.getCreateTime());
-        catalogPartition.setLastAccessTime(hCatPartition.getLastAccessTime());
-        Map<String, String> params = hCatPartition.getParameters();
-        if (params != null) {
-            String size = hCatPartition.getParameters().get("totalSize");
-            if (StringUtils.isNotBlank(size)) {
-                catalogPartition.setSize(Long.parseLong(size));
-            }
-        }
-        return catalogPartition;
-    }
-
-    //Drop single partition
-    @Override
-    public boolean dropPartition(Configuration conf, String catalogUrl,
-                                  String database, String tableName,
-                                  List<String> partitionValues, boolean deleteData) throws FalconException {
-        LOG.info("Dropping partition for: {}, partition: {}", tableName, partitionValues);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            return client.dropPartition(database, tableName, partitionValues, deleteData);
-        } catch (Exception e) {
-            throw new FalconException("Exception dropping partitions:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public void dropPartitions(Configuration conf, String catalogUrl,
-                               String database, String tableName,
-                               List<String> partitionValues, boolean deleteData) throws FalconException {
-        LOG.info("Dropping partitions for: {}, partitions: {}", tableName, partitionValues);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            List<Partition> partitions = client.listPartitions(database, tableName, partitionValues, (short) -1);
-            for (Partition part : partitions) {
-                LOG.info("Dropping partition for: {}, partition: {}", tableName, part.getValues());
-                client.dropPartition(database, tableName, part.getValues(), deleteData);
-            }
-        } catch (Exception e) {
-            throw new FalconException("Exception dropping partitions:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public CatalogPartition getPartition(Configuration conf, String catalogUrl,
-                                         String database, String tableName,
-                                         List<String> partitionValues) throws FalconException {
-        LOG.info("Fetch partition for: {}, partition spec: {}", tableName, partitionValues);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            Partition hCatPartition = client.getPartition(database, tableName, partitionValues);
-            return createCatalogPartition(hCatPartition);
-        } catch (NoSuchObjectException nsoe) {
-            throw new FalconException(PARTITION_DOES_NOT_EXIST + ":" + nsoe.getMessage(), nsoe);
-        } catch (Exception e) {
-            throw new FalconException("Exception fetching partition:" + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public List<String> getPartitionColumns(Configuration conf, String catalogUrl, String database,
-                                            String tableName) throws FalconException {
-        LOG.info("Fetching partition columns of table: " + tableName);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            Table table = client.getTable(database, tableName);
-            List<String> partCols = new ArrayList<String>();
-            for (FieldSchema part : table.getPartitionKeys()) {
-                partCols.add(part.getName());
-            }
-            return partCols;
-        } catch (Exception e) {
-            throw new FalconException("Exception fetching partition columns: " + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public void addPartition(Configuration conf, String catalogUrl, String database,
-                             String tableName, List<String> partValues, String location) throws FalconException {
-        LOG.info("Adding partition {} for {}.{} with location {}", partValues, database, tableName, location);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            Table table = client.getTable(database, tableName);
-            org.apache.hadoop.hive.metastore.api.Partition part = new org.apache.hadoop.hive.metastore.api.Partition();
-            part.setDbName(database);
-            part.setTableName(tableName);
-            part.setValues(partValues);
-            part.setSd(table.getSd());
-            part.getSd().setLocation(location);
-            part.setParameters(table.getParameters());
-            if (part.getParameters() == null) {
-                part.setParameters(new HashMap<String, String>());
-            }
-            part.getParameters().put(CREATE_TIME, String.valueOf(System.currentTimeMillis()));
-            client.add_partition(part);
-
-        } catch (Exception e) {
-            throw new FalconException("Exception adding partition: " + e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public void updatePartition(Configuration conf, String catalogUrl, String database,
-                             String tableName, List<String> partValues, String location) throws FalconException {
-        LOG.info("Updating partition {} of {}.{} with location {}", partValues, database, tableName, location);
-
-        try {
-            HiveMetaStoreClient client = createClient(conf, catalogUrl);
-            Table table = client.getTable(database, tableName);
-            org.apache.hadoop.hive.metastore.api.Partition part = new org.apache.hadoop.hive.metastore.api.Partition();
-            part.setDbName(database);
-            part.setTableName(tableName);
-            part.setValues(partValues);
-            part.setSd(table.getSd());
-            part.getSd().setLocation(location);
-            part.setParameters(table.getParameters());
-            if (part.getParameters() == null) {
-                part.setParameters(new HashMap<String, String>());
-            }
-            part.getParameters().put(UPDATE_TIME, String.valueOf(System.currentTimeMillis()));
-            client.alter_partition(database, tableName, part);
-        } catch (Exception e) {
-            throw new FalconException("Exception updating partition: " + e.getMessage(), e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/cleanup/AbstractCleanupHandler.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/cleanup/AbstractCleanupHandler.java b/common/src/main/java/org/apache/falcon/cleanup/AbstractCleanupHandler.java
deleted file mode 100644
index 85d7263..0000000
--- a/common/src/main/java/org/apache/falcon/cleanup/AbstractCleanupHandler.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.cleanup;
-
-import org.apache.commons.el.ExpressionEvaluatorImpl;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.DeploymentUtil;
-import org.apache.falcon.util.RuntimeProperties;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.jsp.el.ELException;
-import javax.servlet.jsp.el.ExpressionEvaluator;
-import java.io.IOException;
-
-/**
- * Falcon cleanup handler for cleaning up work, temp and log files
- * left behind by falcon.
- */
-public abstract class AbstractCleanupHandler {
-
-    protected static final Logger LOG = LoggerFactory.getLogger(AbstractCleanupHandler.class);
-
-    protected static final ConfigurationStore STORE = ConfigurationStore.get();
-    public static final ExpressionEvaluator EVALUATOR = new ExpressionEvaluatorImpl();
-    public static final ExpressionHelper RESOLVER = ExpressionHelper.get();
-
-    protected long getRetention(Entity entity, TimeUnit timeUnit)
-        throws FalconException {
-
-        String retention = getRetentionValue(timeUnit);
-        try {
-            return (Long) EVALUATOR.evaluate("${" + retention + "}",
-                    Long.class, RESOLVER, RESOLVER);
-        } catch (ELException e) {
-            throw new FalconException("Unable to evalue retention limit: "
-                    + retention + " for entity: " + entity.getName());
-        }
-    }
-
-    private String getRetentionValue(Frequency.TimeUnit timeunit) {
-        String defaultValue;
-        switch (timeunit) {
-        case minutes:
-            defaultValue = "hours(24)";
-            break;
-
-        case hours:
-            defaultValue = "days(3)";
-            break;
-
-        case days:
-            defaultValue = "days(12)";
-            break;
-
-        case months:
-            defaultValue = "months(3)";
-            break;
-
-        default:
-            defaultValue = "days(1)";
-        }
-        return RuntimeProperties.get().getProperty("log.cleanup.frequency." + timeunit + ".retention", defaultValue);
-    }
-
-    protected FileStatus[] getAllLogs(FileSystem fs, Cluster cluster,
-                                      Entity entity) throws FalconException {
-        FileStatus[] paths;
-        try {
-            Path logPath = getLogPath(cluster, entity);
-            paths = fs.globStatus(logPath);
-        } catch (IOException e) {
-            throw new FalconException(e);
-        }
-
-        return paths;
-    }
-
-    private Path getLogPath(Cluster cluster, Entity entity) {
-        // logsPath = base log path + relative path
-        return new Path(EntityUtil.getLogPath(cluster, entity), getRelativeLogPath());
-    }
-
-    private FileSystem getFileSystemAsEntityOwner(Cluster cluster,
-                                                  Entity entity) throws FalconException {
-        try {
-            final AccessControlList acl = entity.getACL();
-            // To support backward compatibility, will only use the ACL owner only if present
-            if (acl != null) {
-                CurrentUser.authenticate(acl.getOwner()); // proxy user
-            }
-
-            return HadoopClientFactory.get().createProxiedFileSystem(
-                    ClusterHelper.getConfiguration(cluster));
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    protected void delete(String clusterName, Entity entity, long retention) throws FalconException {
-        Cluster currentCluster = STORE.get(EntityType.CLUSTER, clusterName);
-        if (!isClusterInCurrentColo(currentCluster.getColo())) {
-            LOG.info("Ignoring cleanup for {}: {} in cluster: {} as this does not belong to current colo",
-                    entity.getEntityType(), entity.getName(), clusterName);
-            return;
-        }
-
-        LOG.info("Cleaning up logs for {}: {} in cluster: {} with retention: {}",
-                entity.getEntityType(), entity.getName(), clusterName, retention);
-
-        FileSystem fs = getFileSystemAsEntityOwner(currentCluster, entity);
-        FileStatus[] logs = getAllLogs(fs, currentCluster, entity);
-        deleteInternal(fs, currentCluster, entity, retention, logs);
-    }
-
-    private void deleteInternal(FileSystem fs, Cluster cluster, Entity entity,
-                                long retention, FileStatus[] logs) throws FalconException {
-        if (logs == null || logs.length == 0) {
-            LOG.info("Nothing to delete for cluster: {}, entity: {}", cluster.getName(),
-                    entity.getName());
-            return;
-        }
-
-        long now = System.currentTimeMillis();
-
-        for (FileStatus log : logs) {
-            if (now - log.getModificationTime() > retention) {
-                try {
-                    boolean isDeleted = fs.delete(log.getPath(), true);
-                    LOG.error(isDeleted ? "Deleted path: {}" : "Unable to delete path: {}",
-                            log.getPath());
-                    deleteParentIfEmpty(fs, log.getPath().getParent());
-                } catch (IOException e) {
-                    throw new FalconException(" Unable to delete log file : "
-                            + log.getPath() + " for entity " + entity.getName()
-                            + " for cluster: " + cluster.getName(), e);
-                }
-            } else {
-                LOG.info("Retention limit: {} is less than modification {} for path: {}", retention,
-                        (now - log.getModificationTime()), log.getPath());
-            }
-        }
-    }
-
-    private void deleteParentIfEmpty(FileSystem fs, Path parent) throws IOException {
-        FileStatus[] files = fs.listStatus(parent);
-        if (files != null && files.length == 0) {
-            LOG.info("Parent path: {} is empty, deleting path", parent);
-            fs.delete(parent, true);
-            deleteParentIfEmpty(fs, parent.getParent());
-        }
-    }
-
-    public abstract void cleanup() throws FalconException;
-
-    protected abstract String getRelativeLogPath();
-
-    protected boolean isClusterInCurrentColo(String colo) {
-        final String currentColo = StartupProperties.get().getProperty("current.colo", "default");
-        return DeploymentUtil.isEmbeddedMode() || currentColo.equals(colo);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/cleanup/FeedCleanupHandler.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/cleanup/FeedCleanupHandler.java b/common/src/main/java/org/apache/falcon/cleanup/FeedCleanupHandler.java
deleted file mode 100644
index 16db7d8..0000000
--- a/common/src/main/java/org/apache/falcon/cleanup/FeedCleanupHandler.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.cleanup;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-
-import java.util.Collection;
-
-/**
- * Cleanup files relating to feed management workflows.
- */
-public class FeedCleanupHandler extends AbstractCleanupHandler {
-
-    @Override
-    public void cleanup() throws FalconException {
-        Collection<String> feeds = STORE.getEntities(EntityType.FEED);
-        for (String feedName : feeds) {
-            Feed feed = STORE.get(EntityType.FEED, feedName);
-            long retention = getRetention(feed, feed.getFrequency().getTimeUnit());
-
-            for (org.apache.falcon.entity.v0.feed.Cluster cluster : feed.getClusters().getClusters()) {
-                delete(cluster.getName(), feed, retention);
-            }
-        }
-    }
-
-    @Override
-    protected String getRelativeLogPath() {
-        return "job-*/*/*";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/cleanup/ProcessCleanupHandler.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/cleanup/ProcessCleanupHandler.java b/common/src/main/java/org/apache/falcon/cleanup/ProcessCleanupHandler.java
deleted file mode 100644
index 00281f9..0000000
--- a/common/src/main/java/org/apache/falcon/cleanup/ProcessCleanupHandler.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.cleanup;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.process.Process;
-
-import java.util.Collection;
-
-/**
- * Handler to cleanup files left behind by falcon relating to process.
- */
-public class ProcessCleanupHandler extends AbstractCleanupHandler {
-
-    @Override
-    public void cleanup() throws FalconException {
-        Collection<String> processes = STORE.getEntities(EntityType.PROCESS);
-        for (String processName : processes) {
-            Process process = STORE.get(EntityType.PROCESS, processName);
-            long retention = getRetention(process, process.getFrequency().getTimeUnit());
-
-            for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-                delete(cluster.getName(), process, retention);
-            }
-        }
-    }
-
-    @Override
-    protected String getRelativeLogPath() {
-        return "job-*/*";
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/CatalogStorage.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/CatalogStorage.java b/common/src/main/java/org/apache/falcon/entity/CatalogStorage.java
deleted file mode 100644
index c5860c9..0000000
--- a/common/src/main/java/org/apache/falcon/entity/CatalogStorage.java
+++ /dev/null
@@ -1,592 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.Pair;
-import org.apache.falcon.catalog.AbstractCatalogService;
-import org.apache.falcon.catalog.CatalogPartition;
-import org.apache.falcon.catalog.CatalogServiceFactory;
-import org.apache.falcon.catalog.HiveCatalogService;
-import org.apache.falcon.entity.common.FeedDataPath;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.feed.CatalogTable;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.retention.EvictedInstanceSerDe;
-import org.apache.falcon.retention.EvictionHelper;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.jsp.el.ELException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-
-/**
- * A catalog registry implementation of a feed storage.
- */
-public class CatalogStorage extends Configured implements Storage {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EvictionHelper.class);
-
-    // constants to be used while preparing HCatalog partition filter query
-    private static final String FILTER_ST_BRACKET = "(";
-    private static final String FILTER_END_BRACKET = ")";
-    private static final String FILTER_QUOTE = "'";
-    private static final String FILTER_AND = " and ";
-    private static final String FILTER_OR = " or ";
-    private static final String FILTER_LESS_THAN = " < ";
-    private static final String FILTER_EQUALS = " = ";
-
-    private final StringBuffer instancePaths = new StringBuffer();
-    private final StringBuilder instanceDates = new StringBuilder();
-
-    public static final String PARTITION_SEPARATOR = ";";
-    public static final String PARTITION_KEYVAL_SEPARATOR = "=";
-    public static final String INPUT_PATH_SEPARATOR = ":";
-    public static final String OUTPUT_PATH_SEPARATOR = "/";
-    public static final String PARTITION_VALUE_QUOTE = "'";
-
-    public static final String CATALOG_URL = "${hcatNode}";
-
-    private final String catalogUrl;
-    private String database;
-    private String table;
-    private Map<String, String> partitions;
-
-    protected CatalogStorage(Feed feed) throws URISyntaxException {
-        this(CATALOG_URL, feed.getTable());
-    }
-
-    public CatalogStorage(Cluster cluster, CatalogTable table) throws URISyntaxException {
-        this(ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY).getEndpoint(), table);
-    }
-
-    protected CatalogStorage(String catalogUrl, CatalogTable table) throws URISyntaxException {
-        this(catalogUrl, table.getUri());
-    }
-
-    protected CatalogStorage(String catalogUrl, String tableUri) throws URISyntaxException {
-        if (catalogUrl == null || catalogUrl.length() == 0) {
-            throw new IllegalArgumentException("Catalog Registry URL cannot be null or empty");
-        }
-
-        this.catalogUrl = catalogUrl;
-
-        parseFeedUri(tableUri);
-    }
-
-    /**
-     * Validate URI to conform to catalog:$database:$table#$partitions.
-     * scheme=catalog:database=$database:table=$table#$partitions
-     * partitions=key=value;key=value
-     *
-     * @param catalogTableUri table URI to parse and validate
-     * @throws URISyntaxException
-     */
-    private void parseFeedUri(String catalogTableUri) throws URISyntaxException {
-
-        final String processed = catalogTableUri.replaceAll(DOLLAR_EXPR_START_REGEX, DOLLAR_EXPR_START_NORMALIZED)
-                                                .replaceAll("}", EXPR_CLOSE_NORMALIZED);
-        URI tableUri = new URI(processed);
-
-        if (!"catalog".equals(tableUri.getScheme())) {
-            throw new URISyntaxException(tableUri.toString(), "catalog scheme is missing");
-        }
-
-        final String schemeSpecificPart = tableUri.getSchemeSpecificPart();
-        if (schemeSpecificPart == null) {
-            throw new URISyntaxException(tableUri.toString(), "Database and Table are missing");
-        }
-
-        String[] paths = schemeSpecificPart.split(INPUT_PATH_SEPARATOR);
-
-        if (paths.length != 2) {
-            throw new URISyntaxException(tableUri.toString(), "URI path is not in expected format: database:table");
-        }
-
-        database = paths[0];
-        table = paths[1];
-
-        if (database == null || database.length() == 0) {
-            throw new URISyntaxException(tableUri.toString(), "DB name is missing");
-        }
-        if (table == null || table.length() == 0) {
-            throw new URISyntaxException(tableUri.toString(), "Table name is missing");
-        }
-
-        String partRaw = tableUri.getFragment();
-        if (partRaw == null || partRaw.length() == 0) {
-            throw new URISyntaxException(tableUri.toString(), "Partition details are missing");
-        }
-
-        final String rawPartition = partRaw.replaceAll(DOLLAR_EXPR_START_NORMALIZED, DOLLAR_EXPR_START_REGEX)
-                                           .replaceAll(EXPR_CLOSE_NORMALIZED, EXPR_CLOSE_REGEX);
-        partitions = new LinkedHashMap<String, String>(); // preserve insertion order
-        String[] parts = rawPartition.split(PARTITION_SEPARATOR);
-        for (String part : parts) {
-            if (part == null || part.length() == 0) {
-                continue;
-            }
-
-            String[] keyVal = part.split(PARTITION_KEYVAL_SEPARATOR);
-            if (keyVal.length != 2) {
-                throw new URISyntaxException(tableUri.toString(),
-                        "Partition key value pair is not specified properly in (" + part + ")");
-            }
-
-            partitions.put(keyVal[0], keyVal[1]);
-        }
-    }
-
-    /**
-     * Create an instance from the URI Template that was generated using
-     * the getUriTemplate() method.
-     *
-     * @param uriTemplate the uri template from org.apache.falcon.entity.CatalogStorage#getUriTemplate
-     * @throws URISyntaxException
-     */
-    protected CatalogStorage(String uriTemplate) throws URISyntaxException {
-        if (uriTemplate == null || uriTemplate.length() == 0) {
-            throw new IllegalArgumentException("URI template cannot be null or empty");
-        }
-
-        final String processed = uriTemplate.replaceAll(DOLLAR_EXPR_START_REGEX, DOLLAR_EXPR_START_NORMALIZED)
-                                            .replaceAll("}", EXPR_CLOSE_NORMALIZED);
-        URI uri = new URI(processed);
-
-        this.catalogUrl = uri.getScheme() + "://" + uri.getAuthority();
-
-        parseUriTemplate(uri);
-    }
-
-    protected CatalogStorage(String uriTemplate, Configuration conf) throws URISyntaxException {
-        this(uriTemplate);
-        setConf(conf);
-    }
-
-    private void parseUriTemplate(URI uriTemplate) throws URISyntaxException {
-        String path = uriTemplate.getPath();
-        String[] paths = path.split(OUTPUT_PATH_SEPARATOR);
-        if (paths.length != 4) {
-            throw new URISyntaxException(uriTemplate.toString(),
-                    "URI path is not in expected format: database:table");
-        }
-
-        database = paths[1];
-        table = paths[2];
-        String partRaw = paths[3];
-
-        if (database == null || database.length() == 0) {
-            throw new URISyntaxException(uriTemplate.toString(), "DB name is missing");
-        }
-        if (table == null || table.length() == 0) {
-            throw new URISyntaxException(uriTemplate.toString(), "Table name is missing");
-        }
-        if (partRaw == null || partRaw.length() == 0) {
-            throw new URISyntaxException(uriTemplate.toString(), "Partition details are missing");
-        }
-
-        String rawPartition = partRaw.replaceAll(DOLLAR_EXPR_START_NORMALIZED, DOLLAR_EXPR_START_REGEX)
-                .replaceAll(EXPR_CLOSE_NORMALIZED, EXPR_CLOSE_REGEX);
-        partitions = new LinkedHashMap<String, String>();
-        String[] parts = rawPartition.split(PARTITION_SEPARATOR);
-        for (String part : parts) {
-            if (part == null || part.length() == 0) {
-                continue;
-            }
-
-            String[] keyVal = part.split(PARTITION_KEYVAL_SEPARATOR);
-            if (keyVal.length != 2) {
-                throw new URISyntaxException(uriTemplate.toString(),
-                        "Partition key value pair is not specified properly in (" + part + ")");
-            }
-
-            partitions.put(keyVal[0], keyVal[1]);
-        }
-    }
-
-    public String getCatalogUrl() {
-        return catalogUrl;
-    }
-
-    public String getDatabase() {
-        return database;
-    }
-
-    public String getTable() {
-        return table;
-    }
-
-    public Map<String, String> getPartitions() {
-        return partitions;
-    }
-
-    /**
-     * @param key partition key
-     * @return partition value
-     */
-    public String getPartitionValue(String key) {
-        return partitions.get(key);
-    }
-
-    /**
-     * @param key partition key
-     * @return if partitions map includes the key or not
-     */
-    public boolean hasPartition(String key) {
-        return partitions.containsKey(key);
-    }
-
-    public List<String> getDatedPartitionKeys() {
-        List<String> keys = new ArrayList<String>();
-
-        for (Map.Entry<String, String> entry : getPartitions().entrySet()) {
-
-            Matcher matcher = FeedDataPath.PATTERN.matcher(entry.getValue());
-            if (matcher.find()) {
-                keys.add(entry.getKey());
-            }
-        }
-
-        return keys;
-    }
-
-    /**
-     * Convert the partition map to filter string.
-     * Each key value pair is separated by ';'.
-     *
-     * @return filter string
-     */
-    public String toPartitionFilter() {
-        StringBuilder filter = new StringBuilder();
-        filter.append("(");
-        for (Map.Entry<String, String> entry : partitions.entrySet()) {
-            if (filter.length() > 1) {
-                filter.append(PARTITION_SEPARATOR);
-            }
-            filter.append(entry.getKey());
-            filter.append(PARTITION_KEYVAL_SEPARATOR);
-            filter.append(PARTITION_VALUE_QUOTE);
-            filter.append(entry.getValue());
-            filter.append(PARTITION_VALUE_QUOTE);
-        }
-        filter.append(")");
-        return filter.toString();
-    }
-
-    /**
-     * Convert the partition map to path string.
-     * Each key value pair is separated by '/'.
-     *
-     * @return path string
-     */
-    public String toPartitionAsPath() {
-        StringBuilder partitionFilter = new StringBuilder();
-
-        for (Map.Entry<String, String> entry : getPartitions().entrySet()) {
-            partitionFilter.append(entry.getKey())
-                    .append(PARTITION_KEYVAL_SEPARATOR)
-                    .append(entry.getValue())
-                    .append(OUTPUT_PATH_SEPARATOR);
-        }
-
-        partitionFilter.setLength(partitionFilter.length() - 1);
-        return partitionFilter.toString();
-    }
-
-    @Override
-    public TYPE getType() {
-        return TYPE.TABLE;
-    }
-
-    /**
-     * LocationType does NOT matter here.
-     */
-    @Override
-    public String getUriTemplate() {
-        return getUriTemplate(LocationType.DATA);
-    }
-
-    /**
-     * LocationType does NOT matter here.
-     */
-    @Override
-    public String getUriTemplate(LocationType locationType) {
-        StringBuilder uriTemplate = new StringBuilder();
-        uriTemplate.append(catalogUrl);
-        uriTemplate.append(OUTPUT_PATH_SEPARATOR);
-        uriTemplate.append(database);
-        uriTemplate.append(OUTPUT_PATH_SEPARATOR);
-        uriTemplate.append(table);
-        uriTemplate.append(OUTPUT_PATH_SEPARATOR);
-        for (Map.Entry<String, String> entry : partitions.entrySet()) {
-            uriTemplate.append(entry.getKey());
-            uriTemplate.append(PARTITION_KEYVAL_SEPARATOR);
-            uriTemplate.append(entry.getValue());
-            uriTemplate.append(PARTITION_SEPARATOR);
-        }
-        uriTemplate.setLength(uriTemplate.length() - 1);
-
-        return uriTemplate.toString();
-    }
-
-    @Override
-    public boolean isIdentical(Storage toCompareAgainst) throws FalconException {
-        if (!(toCompareAgainst instanceof CatalogStorage)) {
-            return false;
-        }
-
-        CatalogStorage catalogStorage = (CatalogStorage) toCompareAgainst;
-
-        return !(getCatalogUrl() != null && !getCatalogUrl().equals(catalogStorage.getCatalogUrl()))
-                && getDatabase().equals(catalogStorage.getDatabase())
-                && getTable().equals(catalogStorage.getTable())
-                && getPartitions().equals(catalogStorage.getPartitions());
-    }
-
-    @Override
-    public void validateACL(AccessControlList acl) throws FalconException {
-        // This is not supported in Hive today as authorization is not enforced on table and
-        // partition listing
-    }
-
-    @Override
-    public List<FeedInstanceStatus> getListing(Feed feed, String clusterName, LocationType locationType,
-                                               Date start, Date end) throws FalconException {
-        try {
-            List<FeedInstanceStatus> instances = new ArrayList<FeedInstanceStatus>();
-            Date feedStart = FeedHelper.getFeedValidityStart(feed, clusterName);
-            Date alignedDate = EntityUtil.getNextStartTime(feedStart, feed.getFrequency(),
-                    feed.getTimezone(), start);
-
-            while (!end.before(alignedDate)) {
-                List<String> partitionValues = getCatalogPartitionValues(alignedDate);
-                try {
-                    CatalogPartition partition = CatalogServiceFactory.getCatalogService().getPartition(
-                            getConf(), getCatalogUrl(), getDatabase(), getTable(), partitionValues);
-                    instances.add(getFeedInstanceFromCatalogPartition(partition));
-                } catch (FalconException e) {
-                    if (e.getMessage().startsWith(HiveCatalogService.PARTITION_DOES_NOT_EXIST)) {
-                        // Partition missing
-                        FeedInstanceStatus instanceStatus = new FeedInstanceStatus(null);
-                        instanceStatus.setInstance(partitionValues.toString());
-                        instances.add(instanceStatus);
-                    } else {
-                        throw e;
-                    }
-                }
-                alignedDate = FeedHelper.getNextFeedInstanceDate(alignedDate, feed);
-            }
-            return instances;
-        } catch (Exception e) {
-            LOG.error("Unable to retrieve listing for {}:{} -- {}", locationType, catalogUrl, e.getMessage());
-            throw new FalconException("Unable to retrieve listing for (URI " + catalogUrl + ")", e);
-        }
-    }
-
-    private List<String> getCatalogPartitionValues(Date alignedDate) throws FalconException {
-        List<String> partitionValues  = new ArrayList<String>();
-        for (Map.Entry<String, String> entry : getPartitions().entrySet()) {
-            if (FeedDataPath.PATTERN.matcher(entry.getValue()).find()) {
-                ExpressionHelper.setReferenceDate(alignedDate);
-                ExpressionHelper expressionHelper = ExpressionHelper.get();
-                String instanceValue = expressionHelper.evaluateFullExpression(entry.getValue(), String.class);
-                partitionValues.add(instanceValue);
-            } else {
-                partitionValues.add(entry.getValue());
-            }
-        }
-        return partitionValues;
-    }
-
-    private FeedInstanceStatus getFeedInstanceFromCatalogPartition(CatalogPartition partition) {
-        FeedInstanceStatus feedInstanceStatus = new FeedInstanceStatus(partition.getLocation());
-        feedInstanceStatus.setCreationTime(partition.getCreateTime());
-        feedInstanceStatus.setInstance(partition.getValues().toString());
-        FeedInstanceStatus.AvailabilityStatus availabilityStatus = FeedInstanceStatus.AvailabilityStatus.MISSING;
-        long size = partition.getSize();
-        if (size == 0) {
-            availabilityStatus = FeedInstanceStatus.AvailabilityStatus.EMPTY;
-        } else if (size > 0) {
-            availabilityStatus = FeedInstanceStatus.AvailabilityStatus.AVAILABLE;
-        }
-        feedInstanceStatus.setSize(size);
-        feedInstanceStatus.setStatus(availabilityStatus);
-        return feedInstanceStatus;
-    }
-
-    @Override
-    public FeedInstanceStatus.AvailabilityStatus getInstanceAvailabilityStatus(Feed feed, String clusterName,
-                                         LocationType locationType, Date instanceTime) throws FalconException {
-        List<FeedInstanceStatus> result = getListing(feed, clusterName, locationType, instanceTime, instanceTime);
-        if (result.isEmpty()) {
-            return FeedInstanceStatus.AvailabilityStatus.MISSING;
-        } else {
-            return result.get(0).getStatus();
-        }
-    }
-
-    @Override
-    public StringBuilder evict(String retentionLimit, String timeZone, Path logFilePath) throws FalconException {
-        LOG.info("Applying retention on {}, Limit: {}, timezone: {}",
-                getTable(), retentionLimit, timeZone);
-
-        List<CatalogPartition> toBeDeleted;
-        try {
-            // get sorted date partition keys and values
-            toBeDeleted = discoverPartitionsToDelete(retentionLimit, timeZone);
-        } catch (ELException e) {
-            throw new FalconException("Couldn't find partitions to be deleted", e);
-
-        }
-
-        if (toBeDeleted.isEmpty()) {
-            LOG.info("No partitions to delete.");
-        } else {
-            final boolean isTableExternal = CatalogServiceFactory.getCatalogService().isTableExternal(
-                getConf(), getCatalogUrl(), getDatabase(), getTable());
-            try {
-                dropPartitions(toBeDeleted, isTableExternal);
-            } catch (IOException e) {
-                throw new FalconException("Couldn't drop partitions", e);
-            }
-        }
-
-        try {
-            EvictedInstanceSerDe.serializeEvictedInstancePaths(
-                    HadoopClientFactory.get().createProxiedFileSystem(logFilePath.toUri(), new Configuration()),
-                    logFilePath, instancePaths);
-        } catch (IOException e) {
-            throw new FalconException("Couldn't record dropped partitions", e);
-        }
-        return instanceDates;
-    }
-
-    private List<CatalogPartition> discoverPartitionsToDelete(String retentionLimit, String timezone)
-        throws FalconException, ELException {
-        Pair<Date, Date> range = EvictionHelper.getDateRange(retentionLimit);
-        ExpressionHelper.setReferenceDate(range.first);
-        Map<String, String> partitionsToDelete = new LinkedHashMap<String, String>();
-        ExpressionHelper expressionHelper = ExpressionHelper.get();
-        for (Map.Entry<String, String> entry : getPartitions().entrySet()) {
-            if (FeedDataPath.PATTERN.matcher(entry.getValue()).find()) {
-                partitionsToDelete.put(entry.getKey(),
-                        expressionHelper.evaluateFullExpression(entry.getValue(), String.class));
-            }
-        }
-        final String filter = createFilter(partitionsToDelete);
-        return CatalogServiceFactory.getCatalogService().listPartitionsByFilter(
-            getConf(), getCatalogUrl(), getDatabase(), getTable(), filter);
-    }
-
-    /**
-     * Creates hive partition filter from inputs partition map.
-     * @param partitionsMap - ordered map of partition keys and values
-     * @return partition filter
-     * @throws ELException
-     */
-    private String createFilter(Map<String, String> partitionsMap) throws ELException {
-
-        /* Construct filter query string. As an example, suppose the dated partition keys
-         * are: [year, month, day, hour] and dated partition values are [2014, 02, 24, 10].
-         * Then the filter query generated is of the format:
-         * "(year < '2014') or (year = '2014' and month < '02') or
-         * (year = '2014' and month = '02' and day < '24') or
-         * or (year = '2014' and month = '02' and day = '24' and hour < '10')"
-         */
-        StringBuilder filterBuffer = new StringBuilder();
-        List<String> keys = new ArrayList<String>(partitionsMap.keySet());
-        for (int curr = 0; curr < partitionsMap.size(); curr++) {
-            if (curr > 0) {
-                filterBuffer.append(FILTER_OR);
-            }
-            filterBuffer.append(FILTER_ST_BRACKET);
-            for (int prev = 0; prev < curr; prev++) {
-                String key = keys.get(prev);
-                filterBuffer.append(key)
-                        .append(FILTER_EQUALS)
-                        .append(FILTER_QUOTE)
-                        .append(partitionsMap.get(key))
-                        .append(FILTER_QUOTE)
-                        .append(FILTER_AND);
-            }
-            String key = keys.get(curr);
-            filterBuffer.append(key)
-                    .append(FILTER_LESS_THAN)
-                    .append(FILTER_QUOTE)
-                    .append(partitionsMap.get(key))
-                    .append(FILTER_QUOTE)
-                    .append(FILTER_END_BRACKET);
-        }
-
-        return filterBuffer.toString();
-    }
-
-    private void dropPartitions(List<CatalogPartition> partitionsToDelete, boolean isTableExternal)
-        throws FalconException, IOException {
-        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
-        for (CatalogPartition partition : partitionsToDelete) {
-            boolean deleted = catalogService.dropPartition(getConf(), getCatalogUrl(), getDatabase(), getTable(),
-                    partition.getValues(), true);
-
-            if (!deleted) {
-                return;
-            }
-
-            if (isTableExternal) { // nuke the dirs if an external table
-                final Path path = new Path(partition.getLocation());
-                if (!HadoopClientFactory.get().createProxiedFileSystem(path.toUri()).delete(path, true)) {
-                    throw new FalconException("Failed to delete location " + path + " for partition "
-                            + partition.getValues());
-                }
-            }
-
-            // replace ',' with ';' since message producer splits instancePaths string by ','
-            String partitionInfo = partition.getValues().toString().replace(",", ";");
-            LOG.info("Deleted partition: " + partitionInfo);
-            instanceDates.append(partitionInfo).append(',');
-            instancePaths.append(partition.getLocation()).append(EvictedInstanceSerDe.INSTANCEPATH_SEPARATOR);
-        }
-    }
-
-    @Override
-    public String toString() {
-        return "CatalogStorage{"
-                + "catalogUrl='" + catalogUrl + '\''
-                + ", database='" + database + '\''
-                + ", table='" + table + '\''
-                + ", partitions=" + partitions
-                + '}';
-    }
-}


[32/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/FeedHelperTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/FeedHelperTest.java b/common/src/test/java/org/apache/falcon/entity/FeedHelperTest.java
deleted file mode 100644
index 450b251..0000000
--- a/common/src/test/java/org/apache/falcon/entity/FeedHelperTest.java
+++ /dev/null
@@ -1,1080 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.parser.EntityParserFactory;
-import org.apache.falcon.entity.parser.FeedEntityParser;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Properties;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.entity.v0.feed.Argument;
-import org.apache.falcon.entity.v0.feed.Arguments;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Clusters;
-import org.apache.falcon.entity.v0.feed.Extract;
-import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.FieldIncludeExclude;
-import org.apache.falcon.entity.v0.feed.FieldsType;
-import org.apache.falcon.entity.v0.feed.Import;
-import org.apache.falcon.entity.v0.feed.Lifecycle;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.MergeType;
-import org.apache.falcon.entity.v0.feed.RetentionStage;
-import org.apache.falcon.entity.v0.feed.Datasource;
-import org.apache.falcon.entity.v0.feed.Validity;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.Inputs;
-import org.apache.falcon.entity.v0.process.Output;
-import org.apache.falcon.entity.v0.process.Outputs;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.resource.SchedulableEntityInstance;
-import org.apache.falcon.service.LifecyclePolicyMap;
-import org.apache.falcon.util.DateUtil;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.TimeZone;
-
-/**
- * Test for feed helper methods.
- */
-public class FeedHelperTest extends AbstractTestBase {
-    private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
-    private ConfigurationStore store;
-
-    @BeforeClass
-    public void init() throws Exception {
-        initConfigStore();
-        LifecyclePolicyMap.get().init();
-    }
-
-    @BeforeMethod
-    public void setUp() throws Exception {
-        cleanupStore();
-        store = getStore();
-    }
-
-    @Test
-    public void testPartitionExpression() {
-        Assert.assertEquals(FeedHelper.normalizePartitionExpression(" /a// ", "  /b// "), "a/b");
-        Assert.assertEquals(FeedHelper.normalizePartitionExpression(null, "  /b// "), "b");
-        Assert.assertEquals(FeedHelper.normalizePartitionExpression(null, null), "");
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testInstanceBeforeStart() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        FeedHelper.getProducerInstance(feed, getDate("2011-02-27 10:00 UTC"), cluster);
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testInstanceEqualsEnd() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        FeedHelper.getProducerInstance(feed, getDate("2016-02-28 10:00 UTC"), cluster);
-    }
-
-    @Test(expectedExceptions = IllegalArgumentException.class)
-    public void testInstanceOutOfSync() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        FeedHelper.getProducerInstance(feed, getDate("2016-02-28 09:04 UTC"), cluster);
-    }
-
-    @Test
-    public void testInvalidProducerInstance() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        Assert.assertNull(FeedHelper.getProducerInstance(feed, getDate("2012-02-28 10:40 UTC"), cluster));
-    }
-
-    @Test
-    public void testGetProducerOutOfValidity() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2012-02-28 10:45 UTC"),
-                cluster);
-        Assert.assertNull(result);
-    }
-
-    @Test
-    public void testGetConsumersOutOfValidity() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, -20)");
-        inFeed.setEnd("now(0, 0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed, getDate("2016-02-28 09:00 UTC"),
-                cluster);
-        Assert.assertTrue(result.isEmpty());
-    }
-
-    @Test
-    public void testGetFeedValidityStartAndNextInstance() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Date date = FeedHelper.getFeedValidityStart(feed, cluster.getName());
-        Assert.assertEquals(DateUtil.getDateFormatFromTime(date.getTime()), "2011-02-28T10:00Z");
-        Date nextDate = FeedHelper.getNextFeedInstanceDate(date, feed);
-        Assert.assertEquals(DateUtil.getDateFormatFromTime(nextDate.getTime()), "2011-02-28T10:05Z");
-    }
-
-
-    @Test
-    public void testGetConsumersFirstInstance() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2012-02-28 10:47 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, -20)");
-        inFeed.setEnd("now(0, 0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed, getDate("2012-02-28 10:15 UTC"),
-                cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        SchedulableEntityInstance consumer = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2012-02-28 10:37 UTC"), EntityType.PROCESS);
-        consumer.setTags(SchedulableEntityInstance.INPUT);
-        expected.add(consumer);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumersLastInstance() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:20 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, -20)");
-        inFeed.setEnd("now(0, 0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed, getDate("2012-02-28 10:15 UTC"),
-                cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers = { "2012-02-28 10:20 UTC", "2012-02-28 10:30 UTC", };
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetPolicies() throws Exception {
-        FeedEntityParser parser = (FeedEntityParser) EntityParserFactory
-                .getParser(EntityType.FEED);
-        Feed feed = parser.parse(this.getClass().getResourceAsStream(FEED3_XML));
-        List<String> policies = FeedHelper.getPolicies(feed, "testCluster");
-        Assert.assertEquals(policies.size(), 1);
-        Assert.assertEquals(policies.get(0), "AgeBasedDelete");
-    }
-
-    @Test
-    public void testFeedWithNoDependencies() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed, getDate("2016-02-28 09:00 UTC"),
-                cluster);
-        Assert.assertTrue(result.isEmpty());
-        SchedulableEntityInstance res = FeedHelper.getProducerInstance(feed, getDate("2012-02-28 10:45 UTC"),
-                cluster);
-        Assert.assertNull(res);
-    }
-
-    @Test
-    public void testEvaluateExpression() throws Exception {
-        Cluster cluster = new Cluster();
-        cluster.setName("name");
-        cluster.setColo("colo");
-        cluster.setProperties(new Properties());
-        Property prop = new Property();
-        prop.setName("pname");
-        prop.setValue("pvalue");
-        cluster.getProperties().getProperties().add(prop);
-
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster, "${cluster.colo}/*/US"), "colo/*/US");
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster, "${cluster.name}/*/${cluster.pname}"),
-                "name/*/pvalue");
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster, "IN"), "IN");
-    }
-
-    @DataProvider(name = "fsPathsforDate")
-    public Object[][] createPathsForGetDate() {
-        final TimeZone utc = TimeZone.getTimeZone("UTC");
-        final TimeZone pacificTime = TimeZone.getTimeZone("America/Los_Angeles");
-        final TimeZone ist = TimeZone.getTimeZone("IST");
-
-        return new Object[][] {
-            {"/data/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}", "/data/2015/01/01/00/30", utc, "2015-01-01T00:30Z"},
-            {"/data/${YEAR}-${MONTH}-${DAY}-${HOUR}-${MINUTE}", "/data/2015-01-01-01-00", utc, "2015-01-01T01:00Z"},
-            {"/data/${YEAR}/${MONTH}/${DAY}", "/data/2015/01/01", utc, "2015-01-01T00:00Z"},
-            {"/data/${YEAR}/${MONTH}/${DAY}/data", "/data/2015/01/01/data", utc, "2015-01-01T00:00Z"},
-            {"/data/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}", "/data/2015-01-01/00/30", utc, null},
-            {"/data/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}/data", "/data/2015-01-01/00/30", utc, null},
-            {"/d/${YEAR}/${MONTH}/${DAY}/${HOUR}/data", "/d/2015/05/25/00/data/{p1}/p2", utc, "2015-05-25T00:00Z"},
-            {"/data/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}/data", "/data/2015/05/25/00/00/{p1}/p2", utc, null},
-            {"/d/${YEAR}/${MONTH}/M", "/d/2015/11/M", utc, "2015-11-01T00:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/M", "/d/2015/11/02/M", utc, "2015-11-02T00:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/${HOUR}/M", "/d/2015/11/01/04/M", utc, "2015-11-01T04:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}/M", "/d/2015/11/01/04/15/M", utc, "2015-11-01T04:15Z"},
-            {"/d/${YEAR}/${MONTH}/M", "/d/2015/11/M", pacificTime, "2015-11-01T07:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/M", "/d/2015/11/02/M", pacificTime, "2015-11-02T08:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/${HOUR}/M", "/d/2015/11/01/04/M", pacificTime, "2015-11-01T12:00Z"},
-            {"/d/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}/M", "/d/2015/11/01/04/15/M", ist, "2015-10-31T22:45Z"},
-        };
-    }
-
-    @Test(dataProvider = "fsPathsforDate")
-    public void testGetDateFromPath(String template, String path, TimeZone tz, String expectedDate) throws Exception {
-        Date date = FeedHelper.getDate(template, new Path(path), tz);
-        Assert.assertEquals(SchemaHelper.formatDateUTC(date), expectedDate);
-    }
-
-    @Test
-    public void testGetLocations() {
-        Cluster cluster = new Cluster();
-        cluster.setName("name");
-        Feed feed = new Feed();
-        Location location1 = new Location();
-        location1.setType(LocationType.META);
-        Locations locations = new Locations();
-        locations.getLocations().add(location1);
-
-        Location location2 = new Location();
-        location2.setType(LocationType.DATA);
-        locations.getLocations().add(location2);
-
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        feedCluster.setName("name");
-
-        feed.setLocations(locations);
-        Clusters clusters = new Clusters();
-        feed.setClusters(clusters);
-        feed.getClusters().getClusters().add(feedCluster);
-
-        Assert.assertEquals(FeedHelper.getLocations(feedCluster, feed),
-                locations.getLocations());
-        Assert.assertEquals(FeedHelper.getLocation(feed, cluster, LocationType.DATA), location2);
-    }
-
-    @Test
-    public void testGetProducerProcessWithOffset() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Assert.assertNull(FeedHelper.getProducerProcess(feed));
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 10:37 UTC", "2016-02-28 10:37 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2013-02-28 10:35 UTC"),
-                cluster);
-        SchedulableEntityInstance expected = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2013-02-28 10:37 UTC"), EntityType.PROCESS);
-        expected.setTags(SchedulableEntityInstance.OUTPUT);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetProducerProcessForNow() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Assert.assertNull(FeedHelper.getProducerProcess(feed));
-
-        // create it's producer process submit it, test it's ProducerProcess
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2013-02-28 10:00 UTC"),
-                cluster);
-        SchedulableEntityInstance expected = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2013-02-28 10:00 UTC"), EntityType.PROCESS);
-        expected.setTags(SchedulableEntityInstance.OUTPUT);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetProducerWithNowNegativeOffset() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Assert.assertNull(FeedHelper.getProducerProcess(feed));
-
-        // create it's producer process submit it, test it's ProducerProcess
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(-4,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2013-02-27 10:00 UTC"),
-                cluster);
-        SchedulableEntityInstance expected = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2013-02-28 10:00 UTC"), EntityType.PROCESS);
-        expected.setTags(SchedulableEntityInstance.OUTPUT);
-        Assert.assertEquals(result, expected);
-    }
-
-
-    @Test
-    public void testGetProducerWithNowPositiveOffset() throws FalconException, ParseException {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2011-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Assert.assertNull(FeedHelper.getProducerProcess(feed));
-
-        // create it's producer process submit it, test it's ProducerProcess
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("now(4,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2013-02-28 10:00 UTC"),
-                cluster);
-        SchedulableEntityInstance expected = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2013-02-28 10:00 UTC"), EntityType.PROCESS);
-        expected.setTags(SchedulableEntityInstance.OUTPUT);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetProducerProcessInstance() throws FalconException, ParseException {
-        //create a feed, submit it, test that ProducerProcess is null
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2011-02-28 00:00 UTC", "2016-02-28 10:00 UTC");
-
-        // create it's producer process submit it, test it's ProducerProcess
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Outputs outputs = new Outputs();
-        Output outFeed = new Output();
-        outFeed.setName("outputFeed");
-        outFeed.setFeed(feed.getName());
-        outFeed.setInstance("today(0,0)");
-        outputs.getOutputs().add(outFeed);
-        process.setOutputs(outputs);
-        store.publish(EntityType.PROCESS, process);
-        Assert.assertEquals(FeedHelper.getProducerProcess(feed).getName(), process.getName());
-        SchedulableEntityInstance result = FeedHelper.getProducerInstance(feed, getDate("2013-02-28 00:00 UTC"),
-                cluster);
-        SchedulableEntityInstance expected = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2013-02-28 10:00 UTC"), EntityType.PROCESS);
-        expected.setTags(SchedulableEntityInstance.OUTPUT);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumerProcesses() throws FalconException, ParseException {
-        //create a feed, submit it, test that ConsumerProcesses is blank list
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("outputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("today(0,0)");
-        inFeed.setEnd("today(0,0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<Process> result = FeedHelper.getConsumerProcesses(feed);
-        Assert.assertEquals(result.size(), 1);
-        Assert.assertTrue(result.contains(process));
-    }
-
-    @Test
-    public void testGetConsumerProcessInstances() throws Exception {
-        //create a feed, submit it, test that ConsumerProcesses is blank list
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)", "2012-02-28 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "days(1)", "2012-02-28 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(-4, 30)");
-        inFeed.setEnd("now(4, 30)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 09:00 UTC"), cluster);
-        Assert.assertEquals(result.size(), 1);
-
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        SchedulableEntityInstance ins = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2012-02-28 10:00 UTC"), EntityType.PROCESS);
-        ins.setTags(SchedulableEntityInstance.INPUT);
-        expected.add(ins);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumerProcessInstancesWithNonUnitFrequency() throws Exception {
-        //create a feed, submit it, test that ConsumerProcesses is blank list
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2012-02-28 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 09:37 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, -20)");
-        inFeed.setEnd("now(0,0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 09:40 UTC"), cluster);
-
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers = {"2012-02-28 09:47 UTC", "2012-02-28 09:57 UTC"};
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumersOutOfValidityRange() throws Exception {
-        //create a feed, submit it, test that ConsumerProcesses is blank list
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2010-02-28 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 09:37 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, -20)");
-        inFeed.setEnd("now(0,0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2010-02-28 09:40 UTC"), cluster);
-        Assert.assertEquals(result.size(), 0);
-    }
-
-    @Test
-    public void testGetConsumersLargeOffsetShortValidity() throws Exception {
-        //create a feed, submit it, test that ConsumerProcesses is blank list
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "minutes(5)", "2010-02-28 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "minutes(10)", "2012-02-28 09:37 UTC", "2012-02-28 09:47 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("today(-2, 0)");
-        inFeed.setEnd("now(0,0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 09:35 UTC"), cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        SchedulableEntityInstance consumer = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                getDate("2012-02-28 09:37 UTC"), EntityType.PROCESS);
-        consumer.setTags(SchedulableEntityInstance.INPUT);
-        expected.add(consumer);
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetMultipleConsumerInstances() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)", "2012-02-27 00:00 UTC", "2016-02-28 00:00 UTC");
-        Process process = prepareProcess(cluster, "hours(1)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(-4, 30)");
-        inFeed.setEnd("now(4, 30)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 09:00 UTC"), cluster);
-        Assert.assertEquals(result.size(), 9);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers = { "2012-02-28 05:00 UTC", "2012-02-28 06:00 UTC", "2012-02-28 07:00 UTC",
-            "2012-02-28 08:00 UTC", "2012-02-28 09:00 UTC", "2012-02-28 10:00 UTC", "2012-02-28 11:00 UTC",
-            "2012-02-28 12:00 UTC", "2012-02-28 13:00 UTC", };
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumerWithVariableEnd() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)", "2012-02-27 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "hours(1)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("today(0, 0)");
-        inFeed.setEnd("now(0, 0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 00:00 UTC"), cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers =  {"2012-02-28 11:00 UTC", "2012-02-28 16:00 UTC", "2012-02-28 18:00 UTC",
-            "2012-02-28 20:00 UTC", "2012-02-28 13:00 UTC", "2012-02-28 03:00 UTC", "2012-02-28 04:00 UTC",
-            "2012-02-28 06:00 UTC", "2012-02-28 05:00 UTC", "2012-02-28 17:00 UTC", "2012-02-28 00:00 UTC",
-            "2012-02-28 23:00 UTC", "2012-02-28 21:00 UTC", "2012-02-28 15:00 UTC", "2012-02-28 22:00 UTC",
-            "2012-02-28 14:00 UTC", "2012-02-28 08:00 UTC", "2012-02-28 12:00 UTC", "2012-02-28 02:00 UTC",
-            "2012-02-28 01:00 UTC", "2012-02-28 19:00 UTC", "2012-02-28 10:00 UTC", "2012-02-28 09:00 UTC",
-            "2012-02-28 07:00 UTC", };
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumerWithVariableStart() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)", "2012-02-27 00:00 UTC", "2016-02-28 00:00 UTC");
-
-        //create a consumer Process and submit it, assert that this is returned in ConsumerProcesses
-        Process process = prepareProcess(cluster, "hours(1)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("now(0, 0)");
-        inFeed.setEnd("today(24, 0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-03-28 00:00 UTC"), cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers =  {"2012-03-27 16:00 UTC", "2012-03-27 01:00 UTC", "2012-03-27 10:00 UTC",
-            "2012-03-27 03:00 UTC", "2012-03-27 08:00 UTC", "2012-03-27 07:00 UTC", "2012-03-27 19:00 UTC",
-            "2012-03-27 22:00 UTC", "2012-03-27 12:00 UTC", "2012-03-27 20:00 UTC", "2012-03-27 09:00 UTC",
-            "2012-03-27 04:00 UTC", "2012-03-27 14:00 UTC", "2012-03-27 05:00 UTC", "2012-03-27 23:00 UTC",
-            "2012-03-27 17:00 UTC", "2012-03-27 13:00 UTC", "2012-03-27 18:00 UTC", "2012-03-27 15:00 UTC",
-            "2012-03-28 00:00 UTC", "2012-03-27 02:00 UTC", "2012-03-27 11:00 UTC", "2012-03-27 21:00 UTC",
-            "2012-03-27 00:00 UTC", "2012-03-27 06:00 UTC", };
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testGetConsumerWithLatest() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)", "2012-02-27 00:00 UTC", "2016-02-28 00:00 UTC");
-        Process process = prepareProcess(cluster, "hours(1)", "2012-02-27 10:00 UTC", "2016-02-28 10:00 UTC");
-        Inputs inputs = new Inputs();
-        Input inFeed = new Input();
-        inFeed.setName("inputFeed");
-        inFeed.setFeed(feed.getName());
-        inFeed.setStart("today(0, 0)");
-        inFeed.setEnd("latest(0)");
-        inputs.getInputs().add(inFeed);
-        process.setInputs(inputs);
-        store.publish(EntityType.PROCESS, process);
-
-        Set<SchedulableEntityInstance> result = FeedHelper.getConsumerInstances(feed,
-                getDate("2012-02-28 00:00 UTC"), cluster);
-        Set<SchedulableEntityInstance> expected = new HashSet<>();
-        String[] consumers =  {"2012-02-28 23:00 UTC", "2012-02-28 04:00 UTC", "2012-02-28 10:00 UTC",
-            "2012-02-28 07:00 UTC", "2012-02-28 17:00 UTC", "2012-02-28 13:00 UTC", "2012-02-28 05:00 UTC",
-            "2012-02-28 22:00 UTC", "2012-02-28 03:00 UTC", "2012-02-28 21:00 UTC", "2012-02-28 11:00 UTC",
-            "2012-02-28 20:00 UTC", "2012-02-28 06:00 UTC", "2012-02-28 01:00 UTC", "2012-02-28 14:00 UTC",
-            "2012-02-28 00:00 UTC", "2012-02-28 18:00 UTC", "2012-02-28 12:00 UTC", "2012-02-28 16:00 UTC",
-            "2012-02-28 09:00 UTC", "2012-02-28 15:00 UTC", "2012-02-28 19:00 UTC", "2012-02-28 08:00 UTC",
-            "2012-02-28 02:00 UTC", };
-        for (String d : consumers) {
-            SchedulableEntityInstance i = new SchedulableEntityInstance(process.getName(), cluster.getName(),
-                    getDate(d), EntityType.PROCESS);
-            i.setTags(SchedulableEntityInstance.INPUT);
-            expected.add(i);
-        }
-        Assert.assertEquals(result, expected);
-    }
-
-    @Test
-    public void testIsLifeCycleEnabled() throws Exception {
-        Feed feed = new Feed();
-
-        // lifecycle is not defined
-        Clusters clusters = new Clusters();
-        org.apache.falcon.entity.v0.feed.Cluster cluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        cluster.setName("cluster1");
-        clusters.getClusters().add(cluster);
-        feed.setClusters(clusters);
-        Assert.assertFalse(FeedHelper.isLifecycleEnabled(feed, cluster.getName()));
-
-        // lifecycle is defined at global level
-        Lifecycle globalLifecycle = new Lifecycle();
-        RetentionStage retentionStage = new RetentionStage();
-        retentionStage.setFrequency(new Frequency("hours(2)"));
-        globalLifecycle.setRetentionStage(retentionStage);
-        feed.setLifecycle(globalLifecycle);
-        Assert.assertTrue(FeedHelper.isLifecycleEnabled(feed, cluster.getName()));
-
-        // lifecycle is defined at both global and cluster level
-        Lifecycle clusterLifecycle = new Lifecycle();
-        retentionStage = new RetentionStage();
-        retentionStage.setFrequency(new Frequency("hours(4)"));
-        clusterLifecycle.setRetentionStage(retentionStage);
-        feed.getClusters().getClusters().get(0).setLifecycle(clusterLifecycle);
-        Assert.assertTrue(FeedHelper.isLifecycleEnabled(feed, cluster.getName()));
-
-        // lifecycle is defined only at cluster level
-        feed.setLifecycle(null);
-        Assert.assertTrue(FeedHelper.isLifecycleEnabled(feed, cluster.getName()));
-    }
-
-    @Test
-    public void testGetRetentionStage() throws Exception {
-        Feed feed = new Feed();
-        feed.setFrequency(new Frequency("days(1)"));
-
-        // retention stage frequency is not defined
-        Lifecycle globalLifecycle = new Lifecycle();
-        RetentionStage globalRetentionStage = new RetentionStage();
-        globalLifecycle.setRetentionStage(globalRetentionStage);
-        feed.setLifecycle(globalLifecycle);
-
-        Clusters clusters = new Clusters();
-        org.apache.falcon.entity.v0.feed.Cluster cluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        cluster.setName("cluster1");
-        clusters.getClusters().add(cluster);
-        feed.setClusters(clusters);
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("days(1)"));
-
-        // lifecycle is defined only at global level
-        globalRetentionStage.setFrequency(new Frequency("hours(2)"));
-        globalLifecycle.setRetentionStage(globalRetentionStage);
-        feed.setLifecycle(globalLifecycle);
-        Assert.assertNotNull(FeedHelper.getRetentionStage(feed, cluster.getName()));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                feed.getLifecycle().getRetentionStage().getFrequency());
-
-        // lifecycle is defined at both global and cluster level
-        Lifecycle clusterLifecycle = new Lifecycle();
-        RetentionStage clusterRetentionStage = new RetentionStage();
-        clusterRetentionStage.setFrequency(new Frequency("hours(4)"));
-        clusterLifecycle.setRetentionStage(clusterRetentionStage);
-        feed.getClusters().getClusters().get(0).setLifecycle(clusterLifecycle);
-        Assert.assertNotNull(FeedHelper.getRetentionStage(feed, cluster.getName()));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                cluster.getLifecycle().getRetentionStage().getFrequency());
-
-        // lifecycle at both level - retention only at cluster level.
-        feed.getLifecycle().setRetentionStage(null);
-        Assert.assertNotNull(FeedHelper.getRetentionStage(feed, cluster.getName()));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                cluster.getLifecycle().getRetentionStage().getFrequency());
-
-        // lifecycle at both level - retention only at global level.
-        feed.getLifecycle().setRetentionStage(globalRetentionStage);
-        feed.getClusters().getClusters().get(0).getLifecycle().setRetentionStage(null);
-        Assert.assertNotNull(FeedHelper.getRetentionStage(feed, cluster.getName()));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                feed.getLifecycle().getRetentionStage().getFrequency());
-
-        // lifecycle is defined only at cluster level
-        feed.setLifecycle(null);
-        feed.getClusters().getClusters().get(0).getLifecycle().setRetentionStage(clusterRetentionStage);
-        Assert.assertNotNull(FeedHelper.getRetentionStage(feed, cluster.getName()));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                cluster.getLifecycle().getRetentionStage().getFrequency());
-    }
-
-    @Test
-    public void testGetRetentionFrequency() throws Exception {
-        Feed feed = new Feed();
-        feed.setFrequency(new Frequency("days(10)"));
-
-        // no retention stage frequency defined - test both daily and monthly feeds
-        Lifecycle globalLifecycle = new Lifecycle();
-        RetentionStage globalRetentionStage = new RetentionStage();
-        globalLifecycle.setRetentionStage(globalRetentionStage);
-        feed.setLifecycle(globalLifecycle);
-
-        Clusters clusters = new Clusters();
-        org.apache.falcon.entity.v0.feed.Cluster cluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        cluster.setName("cluster1");
-        clusters.getClusters().add(cluster);
-        feed.setClusters(clusters);
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("days(10)"));
-
-        feed.setFrequency(new Frequency("hours(1)"));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("hours(6)"));
-
-        feed.setFrequency(new Frequency("minutes(10)"));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("hours(6)"));
-
-        feed.setFrequency(new Frequency("hours(7)"));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("hours(7)"));
-
-        feed.setFrequency(new Frequency("days(2)"));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("days(2)"));
-
-        // lifecycle at both level - retention only at global level.
-        feed.setFrequency(new Frequency("hours(1)"));
-        globalRetentionStage.setFrequency(new Frequency("hours(2)"));
-        globalLifecycle.setRetentionStage(globalRetentionStage);
-        feed.setLifecycle(globalLifecycle);
-
-        Lifecycle clusterLifecycle = new Lifecycle();
-        RetentionStage clusterRetentionStage = new RetentionStage();
-        clusterLifecycle.setRetentionStage(clusterRetentionStage);
-        feed.getClusters().getClusters().get(0).setLifecycle(clusterLifecycle);
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("hours(6)"));
-
-        // lifecycle at both level - retention only at cluster level.
-        feed.getLifecycle().getRetentionStage().setFrequency(null);
-        clusterRetentionStage.setFrequency(new Frequency("hours(4)"));
-        Assert.assertEquals(FeedHelper.getLifecycleRetentionFrequency(feed, cluster.getName()),
-                            new Frequency("hours(4)"));
-    }
-
-    @Test
-    public void testFeedImportSnapshot() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = importFeedSnapshot(cluster, "hours(1)", "2012-02-07 00:00 UTC", "2020-02-25 00:00 UTC");
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
-        Date startInstResult = FeedHelper.getImportInitalInstance(feedCluster);
-        Assert.assertNotNull(feed.getClusters().getClusters());
-        Assert.assertNotNull(feed.getClusters().getClusters().get(0));
-        Assert.assertNotNull(feed.getClusters().getClusters().get(0).getValidity());
-        Assert.assertNotNull(feed.getClusters().getClusters().get(0).getValidity().getStart());
-        Assert.assertNotNull(startInstResult);
-        Assert.assertNotNull(feedCluster.getValidity().getStart());
-        Assert.assertEquals(getDate("2012-02-07 00:00 UTC"), feedCluster.getValidity().getStart());
-        Assert.assertTrue(FeedHelper.isImportEnabled(feedCluster));
-        Assert.assertEquals(MergeType.SNAPSHOT, FeedHelper.getImportMergeType(feedCluster));
-        Assert.assertEquals(startInstResult, feedCluster.getValidity().getStart());
-    }
-
-    @Test
-    public void testFeedImportFields() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = importFeedSnapshot(cluster, "hours(1)", "2012-02-07 00:00 UTC", "2020-02-25 00:00 UTC");
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
-        Date startInstResult = FeedHelper.getImportInitalInstance(feedCluster);
-        List<String> fieldList = FeedHelper.getImportFieldList(feedCluster);
-        Assert.assertEquals(2, fieldList.size());
-        Assert.assertFalse(FeedHelper.isFieldExcludes(feedCluster.getImport().getSource()));
-    }
-
-    @Test
-    public void testFeedImportAppend() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = importFeedAppend(cluster, "hours(1)", "2012-02-07 00:00 UTC", "2020-02-25 00:00 UTC");
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
-        Date startInstResult = FeedHelper.getImportInitalInstance(feedCluster);
-        Assert.assertEquals(startInstResult, feed.getClusters().getClusters().get(0).getValidity().getStart());
-    }
-
-    public void testGetFeedClusterValidity() throws  Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)",  "2012-02-07 00:00 UTC", "2020-02-25 00:00 UTC");
-        Validity validity = FeedHelper.getClusterValidity(feed, cluster.getName());
-        Assert.assertEquals(validity.getStart(), getDate("2012-02-07 00:00 UTC"));
-        Assert.assertEquals(validity.getEnd(), getDate("2020-02-25 00:00 UTC"));
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testGetClusterValidityInvalidCluster() throws Exception {
-        Cluster cluster = publishCluster();
-        Feed feed = publishFeed(cluster, "hours(1)",  "2012-02-07 00:00 UTC", "2020-02-25 00:00 UTC");
-        FeedHelper.getClusterValidity(feed, "abracadabra");
-    }
-
-    private Validity getFeedValidity(String start, String end) throws ParseException {
-        Validity validity = new Validity();
-        validity.setStart(getDate(start));
-        validity.setEnd(getDate(end));
-        return validity;
-    }
-
-    private org.apache.falcon.entity.v0.process.Validity getProcessValidity(String start, String end) throws
-            ParseException {
-
-        org.apache.falcon.entity.v0.process.Validity validity = new org.apache.falcon.entity.v0.process.Validity();
-        validity.setStart(getDate(start));
-        validity.setEnd(getDate(end));
-        return validity;
-    }
-
-    private Date getDate(String dateString) throws ParseException {
-        DateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm Z");
-        return format.parse(dateString);
-    }
-
-    private Cluster publishCluster() throws FalconException {
-        Cluster cluster = new Cluster();
-        cluster.setName("feedCluster");
-        cluster.setColo("colo");
-        store.publish(EntityType.CLUSTER, cluster);
-        return cluster;
-
-    }
-
-    private Feed publishFeed(Cluster cluster, String frequency, String start, String end)
-        throws FalconException, ParseException {
-        return publishFeed(cluster, frequency, start, end, null);
-    }
-
-    private Feed publishFeed(Cluster cluster, String frequency, String start, String end, Import imp)
-        throws FalconException, ParseException {
-
-        Feed feed = new Feed();
-        feed.setName("feed");
-        Frequency f = new Frequency(frequency);
-        feed.setFrequency(f);
-        feed.setTimezone(UTC);
-        Clusters fClusters = new Clusters();
-        org.apache.falcon.entity.v0.feed.Cluster fCluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        fCluster.setType(ClusterType.SOURCE);
-        fCluster.setImport(imp);
-        fCluster.setName(cluster.getName());
-        fCluster.setValidity(getFeedValidity(start, end));
-        fClusters.getClusters().add(fCluster);
-        feed.setClusters(fClusters);
-        store.publish(EntityType.FEED, feed);
-
-        return feed;
-    }
-
-    private Process prepareProcess(Cluster cluster, String frequency, String start, String end) throws ParseException {
-        Process process = new Process();
-        process.setName("process");
-        process.setTimezone(UTC);
-        org.apache.falcon.entity.v0.process.Clusters pClusters = new org.apache.falcon.entity.v0.process.Clusters();
-        org.apache.falcon.entity.v0.process.Cluster pCluster = new org.apache.falcon.entity.v0.process.Cluster();
-        pCluster.setName(cluster.getName());
-        org.apache.falcon.entity.v0.process.Validity validity = getProcessValidity(start, end);
-        pCluster.setValidity(validity);
-        pClusters.getClusters().add(pCluster);
-        process.setClusters(pClusters);
-        Frequency f = new Frequency(frequency);
-        process.setFrequency(f);
-        return process;
-    }
-
-    private Feed importFeedSnapshot(Cluster cluster, String frequency, String start, String end)
-        throws FalconException, ParseException {
-
-        Import imp = getAnImport(MergeType.SNAPSHOT);
-        Feed feed = publishFeed(cluster, frequency, start, end, imp);
-        return feed;
-    }
-
-    private Feed importFeedAppend(Cluster cluster, String frequency, String start, String end)
-        throws FalconException, ParseException {
-
-        Import imp = getAnImport(MergeType.APPEND);
-        Feed feed = publishFeed(cluster, frequency, start, end);
-        return feed;
-    }
-
-    private Import getAnImport(MergeType mergeType) {
-        Extract extract = new Extract();
-        extract.setType(ExtractMethod.FULL);
-        extract.setMergepolicy(mergeType);
-
-        FieldIncludeExclude fieldInclude = new FieldIncludeExclude();
-        fieldInclude.getFields().add("id");
-        fieldInclude.getFields().add("name");
-        FieldsType fields = new FieldsType();
-        fields.setIncludes(fieldInclude);
-
-        Datasource source = new Datasource();
-        source.setName("test-db");
-        source.setTableName("test-table");
-        source.setExtract(extract);
-        source.setFields(fields);
-
-        Argument a1 = new Argument();
-        a1.setName("--split_by");
-        a1.setValue("id");
-        Argument a2 = new Argument();
-        a2.setName("--num-mappers");
-        a2.setValue("2");
-        Arguments args = new Arguments();
-        List<Argument> argList = args.getArguments();
-        argList.add(a1);
-        argList.add(a2);
-
-        Import imp = new Import();
-        imp.setSource(source);
-        imp.setArguments(args);
-        return imp;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/FileSystemStorageTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/FileSystemStorageTest.java b/common/src/test/java/org/apache/falcon/entity/FileSystemStorageTest.java
deleted file mode 100644
index 30edd94..0000000
--- a/common/src/test/java/org/apache/falcon/entity/FileSystemStorageTest.java
+++ /dev/null
@@ -1,534 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.AccessControlList;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.feed.Cluster;
-import org.apache.falcon.entity.v0.feed.Clusters;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.Validity;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.List;
-import java.util.Properties;
-import java.util.Random;
-import java.util.TimeZone;
-
-/**
- * Test class for File System Storage.
- */
-public class FileSystemStorageTest {
-
-    private static final String USER = FalconTestUtil.TEST_USER_1;
-
-    @BeforeClass
-    public void setUp() {
-        CurrentUser.authenticate(USER);
-    }
-
-    @Test
-    public void testGetType() throws Exception {
-        final Location location = new Location();
-        location.setPath("/foo/bar");
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage(FileSystemStorage.FILE_SYSTEM_URL, locations);
-        Assert.assertEquals(storage.getType(), Storage.TYPE.FILESYSTEM);
-    }
-
-    @Test
-    public void testCreateFromUriTemplate() throws Exception {
-        String feedBasePath = "DATA=hdfs://localhost:8020"
-                + "/data/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}"
-                + "#"
-                + "META=hdfs://localhost:8020"
-                + "/meta/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}"
-                + "#"
-                + "STATS=hdfs://localhost:8020"
-                + "/stats/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}";
-
-        FileSystemStorage storage = new FileSystemStorage(feedBasePath);
-        Assert.assertEquals(storage.getUriTemplate(), feedBasePath);
-
-        Assert.assertEquals("hdfs://localhost:8020", storage.getStorageUrl());
-        Assert.assertEquals("hdfs://localhost:8020/data/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}",
-            storage.getUriTemplate(LocationType.DATA));
-        Assert.assertEquals("hdfs://localhost:8020/stats/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}",
-                storage.getUriTemplate(LocationType.STATS));
-        Assert.assertEquals("hdfs://localhost:8020/meta/YYYY/feed1/mmHH/dd/MM/${YEAR}-${MONTH}-${DAY}/more/${YEAR}",
-                storage.getUriTemplate(LocationType.META));
-    }
-
-    @Test
-    public void testGetUriTemplateForData() throws Exception {
-        final Location location = new Location();
-        location.setPath("/foo/bar");
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage("jail://global:00", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), "jail://global:00/foo/bar");
-    }
-
-    @Test
-    public void testFSHomeDir() {
-        final Location location = new Location();
-        location.setPath("foo/bar"); // relative path
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage("hdfs://localhost:41020", locations);
-        Assert.assertEquals(storage.getWorkingDir().toString(), "/user/" + FalconTestUtil.TEST_USER_1);
-    }
-
-    @Test
-    public void testGetUriTemplateForDataWithRelativePath() throws Exception {
-        final Location location = new Location();
-        location.setPath("foo/bar"); // relative path
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage("hdfs://localhost:41020", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA),
-                "hdfs://localhost:41020/user/" + USER + "/foo/bar");
-
-        storage = new FileSystemStorage("hdfs://localhost:41020/", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA),
-                "hdfs://localhost:41020/user/" + USER + "/foo/bar");
-    }
-
-    @Test
-    public void testGetUriTemplateForDataWithAbsolutePath() throws Exception {
-        final Location location = new Location();
-        location.setPath("/foo/bar"); // absolute path
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage("hdfs://localhost:41020", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), "hdfs://localhost:41020/foo/bar");
-
-        storage = new FileSystemStorage("hdfs://localhost:41020/", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), "hdfs://localhost:41020/foo/bar");
-    }
-
-    @Test
-    public void testGetUriTemplateForDataWithAbsoluteURL() throws Exception {
-        final String absoluteUrl = "s3://host:1000/foo/bar";
-        final Location location = new Location();
-        location.setPath(absoluteUrl); // absolute url
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage("hdfs://localhost:41020", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), absoluteUrl);
-
-        storage = new FileSystemStorage("hdfs://localhost:41020/", locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), absoluteUrl);
-    }
-
-    @Test
-    public void testValidateACL() throws Exception {
-        final Location location = new Location();
-        Path path = new Path("/foo/bar");
-        location.setPath(path.toString());
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        String user = System.getProperty("user.name");
-        EmbeddedCluster cluster = EmbeddedCluster.newCluster(user);
-        FileSystem fs = cluster.getFileSystem();
-        fs.mkdirs(path);
-
-        FileSystemStorage storage = new FileSystemStorage(
-                cluster.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY), locations);
-        storage.validateACL(new TestACL(user, user, "0x755"));
-
-        //-ve case
-        try {
-            storage.validateACL(new TestACL("random", user, "0x755"));
-            Assert.fail("Validation should have failed");
-        } catch(FalconException e) {
-            //expected exception
-        }
-
-        //Timed path
-        location.setPath("/foo/bar/${YEAR}/${MONTH}/${DAY}");
-        storage.validateACL(new TestACL(user, user, "rrr"));
-
-        //-ve case
-        try {
-            storage.validateACL(new TestACL("random", user, "0x755"));
-            Assert.fail("Validation should have failed");
-        } catch(FalconException e) {
-            //expected exception
-        }
-    }
-
-    @DataProvider(name = "locationTestWithRelativePathDataProvider")
-    private Object[][] createLocationTestDataWithRelativePath() {
-        return new Object[][] {
-            {"hdfs://h:0", "localDC/rc/billing/ua2", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"hdfs://h:0/", "localDC/rc/billing/ua2", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"hdfs://h:0", "localDC/rc/billing/ua2/", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"hdfs://h:0/", "localDC/rc/billing/ua2/", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"hdfs://h:0", "localDC/rc/billing/ua2//", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"hdfs://h:0/", "localDC/rc/billing/ua2//", "hdfs://h:0/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "localDC/rc/billing/ua2", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}/", "localDC/rc/billing/ua2", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "localDC/rc/billing/ua2/", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}/", "localDC/rc/billing/ua2/", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "localDC/rc/billing/ua2//", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}/", "localDC/rc/billing/ua2//", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}/", "localDC/rc/billing/ua2//", "${nameNode}/user/" + USER + "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "s3://h:p/localDC/rc/billing/ua2//", "s3://h:p/localDC/rc/billing/ua2"},
-            {"${nameNode}/", "s3://h:p/localDC/rc/billing/ua2//", "s3://h:p/localDC/rc/billing/ua2"},
-            {"hdfs://h:0", "s3://h:p/localDC/rc/billing/ua2//", "s3://h:p/localDC/rc/billing/ua2"},
-            {"hdfs://h:0/", "s3://h:p/localDC/rc/billing/ua2//", "s3://h:p/localDC/rc/billing/ua2"},
-        };
-    }
-
-    @Test (dataProvider = "locationTestWithRelativePathDataProvider")
-    public void testGetUriTemplateWithRelativePath(String storageUrl, String path,
-                                                   String expected) throws Exception {
-        final Location location = new Location();
-        location.setPath(path);
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage(storageUrl, locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), expected);
-    }
-
-    @Test
-    public void testGetUriTemplate() throws Exception {
-        final Location dataLocation = new Location();
-        dataLocation.setPath("/data/foo/bar");
-        dataLocation.setType(LocationType.DATA);
-
-        final Location metaLocation = new Location();
-        metaLocation.setPath("/meta/foo/bar");
-        metaLocation.setType(LocationType.META);
-
-        final Location statsLocation = new Location();
-        statsLocation.setPath("/stats/foo/bar");
-        statsLocation.setType(LocationType.STATS);
-
-        final Location tmpLocation = new Location();
-        tmpLocation.setPath("/tmp/foo/bar");
-        tmpLocation.setType(LocationType.TMP);
-
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(dataLocation);
-        locations.add(metaLocation);
-        locations.add(statsLocation);
-        locations.add(tmpLocation);
-
-        StringBuilder expected = new StringBuilder();
-        expected.append(LocationType.DATA)
-                .append(FileSystemStorage.LOCATION_TYPE_SEP)
-                .append("jail://global:00/data/foo/bar")
-                .append(FileSystemStorage.FEED_PATH_SEP)
-                .append(LocationType.META)
-                .append(FileSystemStorage.LOCATION_TYPE_SEP)
-                .append("jail://global:00/meta/foo/bar")
-                .append(FileSystemStorage.FEED_PATH_SEP)
-                .append(LocationType.STATS)
-                .append(FileSystemStorage.LOCATION_TYPE_SEP)
-                .append("jail://global:00/stats/foo/bar")
-                .append(FileSystemStorage.FEED_PATH_SEP)
-                .append(LocationType.TMP)
-                .append(FileSystemStorage.LOCATION_TYPE_SEP)
-                .append("jail://global:00/tmp/foo/bar");
-
-        FileSystemStorage storage = new FileSystemStorage("jail://global:00", locations);
-        Assert.assertEquals(storage.getUriTemplate(), expected.toString());
-    }
-
-    @Test
-    public void testGetUriTemplateWithOutStorageURL() throws Exception {
-        final Location location = new Location();
-        location.setPath("/foo/bar");
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage(FileSystemStorage.FILE_SYSTEM_URL, locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), "${nameNode}/foo/bar");
-    }
-
-    @DataProvider(name = "locationTestDataProvider")
-    private Object[][] createLocationTestData() {
-        return new Object[][] {
-            {"jail://global:00", "/localDC/rc/billing/ua2", "/localDC/rc/billing/ua2"},
-            {"jail://global:00", "/localDC/rc/billing/ua2/", "/localDC/rc/billing/ua2"},
-            {"jail://global:00", "/localDC/rc/billing/ua2//", "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "/localDC/rc/billing/ua2", "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "/localDC/rc/billing/ua2/", "/localDC/rc/billing/ua2"},
-            {"${nameNode}", "/localDC/rc/billing/ua2//", "/localDC/rc/billing/ua2"},
-        };
-    }
-
-    @Test (dataProvider = "locationTestDataProvider")
-    public void testGetUriTemplateWithLocationType(String storageUrl, String path,
-                                                   String expected) throws Exception {
-        final Location location = new Location();
-        location.setPath(path);
-        location.setType(LocationType.DATA);
-        List<Location> locations = new ArrayList<Location>();
-        locations.add(location);
-
-        FileSystemStorage storage = new FileSystemStorage(storageUrl, locations);
-        Assert.assertEquals(storage.getUriTemplate(LocationType.DATA), storageUrl + expected);
-    }
-
-    @Test
-    public void testIsIdentical() throws Exception {
-        final String storageUrl = "jail://global:00";
-        final Location location1 = new Location();
-        location1.setPath("/foo/bar");
-        location1.setType(LocationType.DATA);
-        List<Location> locations1 = new ArrayList<Location>();
-        locations1.add(location1);
-        FileSystemStorage storage1 = new FileSystemStorage(storageUrl, locations1);
-
-        final Location location2 = new Location();
-        location2.setPath("/foo/bar");
-        location2.setType(LocationType.DATA);
-        List<Location> locations2 = new ArrayList<Location>();
-        locations2.add(location2);
-        FileSystemStorage storage2 = new FileSystemStorage(storageUrl, locations2);
-
-        Assert.assertTrue(storage1.isIdentical(storage2));
-    }
-
-    @Test
-    public void testIsIdenticalNegative() throws Exception {
-        final String storageUrl = "jail://global:00";
-        final Location location1 = new Location();
-        location1.setPath("/foo/baz");
-        location1.setType(LocationType.DATA);
-        List<Location> locations1 = new ArrayList<Location>();
-        locations1.add(location1);
-        FileSystemStorage storage1 = new FileSystemStorage(storageUrl, locations1);
-
-        final Location location2 = new Location();
-        location2.setPath("/foo/bar");
-        location2.setType(LocationType.DATA);
-        List<Location> locations2 = new ArrayList<Location>();
-        locations2.add(location2);
-        FileSystemStorage storage2 = new FileSystemStorage(storageUrl, locations2);
-
-        Assert.assertFalse(storage1.isIdentical(storage2));
-    }
-
-    private class TestACL extends AccessControlList {
-
-        /**
-         * owner is the Owner of this entity.
-         */
-        private String owner;
-
-        /**
-         * group is the one which has access to read - not used at this time.
-         */
-        private String group;
-
-        /**
-         * permission is not enforced at this time.
-         */
-        private String permission;
-
-        TestACL(String owner, String group, String permission) {
-            this.owner = owner;
-            this.group = group;
-            this.permission = permission;
-        }
-
-        @Override
-        public String getOwner() {
-            return owner;
-        }
-
-        @Override
-        public String getGroup() {
-            return group;
-        }
-
-        @Override
-        public String getPermission() {
-            return permission;
-        }
-    }
-
-    @DataProvider(name = "testListingDataProvider")
-    private Object[][] createTestListingData() {
-        final long millis = 24L * 3600 * 1000;
-        final long now = System.currentTimeMillis();
-        TimeZone utc = TimeZone.getTimeZone("UTC");
-        return new Object[][] {
-            {null, Frequency.fromString("hours(2)"), utc, new Date(now - 60 * millis), new Date(now - 56 * millis)},
-            {null, Frequency.fromString("days(1)"), utc, new Date(now - 20 * millis), new Date(now + 6 * millis)},
-            {null, Frequency.fromString("months(1)"), utc, new Date(now - 85 * millis), new Date(now - 10 * millis)},
-        };
-    }
-
-    @Test (dataProvider = "testListingDataProvider")
-    public void testListing(String availabilityFlag, Frequency frequency, TimeZone timeZone,
-                            Date start, Date end) throws Exception {
-        EmbeddedCluster cluster = EmbeddedCluster.newCluster("TestFeedListing", false);
-        FileSystem fs = cluster.getFileSystem();
-        ConfigurationStore.get().publish(EntityType.CLUSTER, cluster.getCluster());
-        try {
-            Feed feed = getFeed(availabilityFlag, frequency, timeZone);
-            List<FeedInstanceStatus> expected = prepareData(fs, feed, start, end);
-            FileSystemStorage fileSystemStorage = new FileSystemStorage(cluster.getFileSystem().
-                    getUri().toString(), feed.getLocations());
-            List<FeedInstanceStatus> actual = fileSystemStorage.
-                    getListing(feed, "TestFeedListing", LocationType.DATA, start, end);
-            Assert.assertEquals(actual, expected, "Feed instance Listings doesn't match");
-        } finally {
-            ConfigurationStore.get().remove(EntityType.CLUSTER, cluster.getCluster().getName());
-        }
-    }
-
-    @SuppressWarnings("MagicConstant")
-    private List<FeedInstanceStatus> prepareData(FileSystem fs, Feed feed,
-                                                 Date start, Date end) throws Exception {
-        fs.delete(new Path("/TestFeedListing"), true);
-        Random random = new Random();
-        List<FeedInstanceStatus> instances = new ArrayList<FeedInstanceStatus>();
-        String basePath = feed.getLocations().getLocations().get(0).getPath();
-        Frequency frequency = feed.getFrequency();
-        TimeZone tz = feed.getTimezone();
-        Date dataStart = EntityUtil.getNextStartTime(feed.getClusters().getClusters().get(0).getValidity().getStart(),
-                feed.getFrequency(), tz, new Date(start.getTime()));
-        Date dataEnd = new Date(end.getTime());
-        while (dataStart.before(dataEnd)) {
-            Properties properties = ExpressionHelper.getTimeVariables(dataStart, tz);
-            String path = ExpressionHelper.substitute(basePath, properties);
-            FeedInstanceStatus instance = new FeedInstanceStatus(path);
-            instance.setStatus(FeedInstanceStatus.AvailabilityStatus.MISSING);
-            instance.setSize(-1);
-            instance.setCreationTime(0);
-            Date date = FeedHelper.getDate(basePath, new Path(path), tz);
-            instance.setInstance(SchemaHelper.formatDateUTC(date));
-            Calendar cal = Calendar.getInstance();
-            cal.setTime(dataStart);
-            cal.add(frequency.getTimeUnit().getCalendarUnit(), frequency.getFrequencyAsInt());
-            dataStart.setTime(cal.getTimeInMillis());
-            if (random.nextBoolean()) {
-                OutputStream out = fs.create(new Path(path, "file"));
-                out.write("Hello World\n".getBytes());
-                out.close();
-                instance.setSize(12);
-                if (feed.getAvailabilityFlag() == null
-                        || (feed.getAvailabilityFlag() != null && random.nextBoolean())) {
-                    //If availability is not present or if ok to create availability file, mark as available
-                    instance.setStatus(FeedInstanceStatus.AvailabilityStatus.AVAILABLE);
-                    if (feed.getAvailabilityFlag() != null) {
-                        fs.create(new Path(path, feed.getAvailabilityFlag())).close();
-                    }
-                } else if (feed.getAvailabilityFlag() != null) {
-                    //If availability is present or not ok to create availability file, mark as partial
-                    fs.mkdirs(new Path(path));
-                    instance.setStatus(FeedInstanceStatus.AvailabilityStatus.PARTIAL);
-                }
-            } else {
-                if (feed.getAvailabilityFlag() == null && random.nextBoolean()) {
-                    //If availability is not present or ok to create dir, mark as empty
-                    fs.mkdirs(new Path(path));
-                    instance.setStatus(FeedInstanceStatus.AvailabilityStatus.EMPTY);
-                    instance.setSize(0);
-                } else if (feed.getAvailabilityFlag() != null && random.nextBoolean()) {
-                    //If availability is present and ok to create dir, mark as partial
-                    fs.mkdirs(new Path(path));
-                    instance.setStatus(FeedInstanceStatus.AvailabilityStatus.PARTIAL);
-                } else if (feed.getAvailabilityFlag() != null)  {
-                    //If availability is present and ok to create empty instance
-                    fs.create(new Path(path, feed.getAvailabilityFlag())).close();
-                    instance.setStatus(FeedInstanceStatus.AvailabilityStatus.EMPTY);
-                    instance.setSize(0);
-                }
-            }
-            try {
-                FileStatus fileStatus = fs.getFileStatus(new Path(path));
-                instance.setCreationTime(fileStatus.getModificationTime());
-            } catch (IOException e) {
-                //ignore
-            }
-            instances.add(instance);
-        }
-        return instances;
-    }
-
-    private Feed getFeed(String availabilityFlag, Frequency frequency, TimeZone timeZone) {
-        Feed feed = new Feed();
-        feed.setAvailabilityFlag(availabilityFlag);
-        feed.setFrequency(frequency);
-        feed.setTimezone(timeZone);
-        feed.setLocations(new Locations());
-        Location dataLocation = new Location();
-        feed.getLocations().getLocations().add(dataLocation);
-        dataLocation.setPath("/TestFeedListing/data/${YEAR}/${MONTH}/${DAY}"
-                + (frequency.getTimeUnit() == Frequency.TimeUnit.hours ? "/${HOUR}" : "") + "/MORE");
-        dataLocation.setType(LocationType.DATA);
-        feed.setClusters(new Clusters());
-        Cluster cluster = new Cluster();
-        cluster.setName("TestFeedListing");
-        feed.getClusters().getClusters().add(cluster);
-        Validity validity = new Validity();
-        cluster.setValidity(validity);
-        validity.setStart(new Date(System.currentTimeMillis() - (1000L * 24 * 3600000)));
-        validity.setEnd(new Date(System.currentTimeMillis() + (1000L * 24 * 3600000)));
-        return feed;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/HiveUtilTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/HiveUtilTest.java b/common/src/test/java/org/apache/falcon/entity/HiveUtilTest.java
deleted file mode 100644
index c37cebd..0000000
--- a/common/src/test/java/org/apache/falcon/entity/HiveUtilTest.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import junit.framework.Assert;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfaces;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.security.SecurityUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.testng.annotations.Test;
-
-import java.util.Properties;
-
-/**
- * Tests for HiveUtil.
- */
-public class HiveUtilTest {
-
-    @Test
-    public void testGetHiveCredentialsWithoutKerberos() {
-        StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, PseudoAuthenticationHandler.TYPE);
-        Cluster cluster = new Cluster();
-        String metaStoreUrl = "thrift://localhost:19083";
-
-        // set registry interface
-        Interfaces interfaces = new Interfaces();
-        Interface registry = new Interface();
-        registry.setEndpoint(metaStoreUrl);
-        registry.setType(Interfacetype.REGISTRY);
-        registry.setVersion("0.1");
-        interfaces.getInterfaces().add(registry);
-        cluster.setInterfaces(interfaces);
-
-        Properties expected = new Properties();
-        expected.put(HiveUtil.METASTORE_UGI, "true");
-        expected.put(HiveUtil.NODE, metaStoreUrl.replace("thrift", "hcat"));
-        expected.put(HiveUtil.METASTROE_URI, metaStoreUrl);
-        expected.put(HiveUtil.METASTOREURIS, metaStoreUrl);
-
-        Properties actual = HiveUtil.getHiveCredentials(cluster);
-        Assert.assertTrue(actual.equals(expected));
-    }
-
-    @Test
-    public void testGetHiveCredentialsWithKerberos() {
-        StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, KerberosAuthenticationHandler.TYPE);
-        Cluster cluster = new Cluster();
-        String metaStoreUrl = "thrift://localhost:19083";
-        String principal = "kerberosPrincipal";
-
-        // set registry interface
-        Interfaces interfaces = new Interfaces();
-        Interface registry = new Interface();
-        registry.setEndpoint(metaStoreUrl);
-        registry.setType(Interfacetype.REGISTRY);
-        registry.setVersion("0.1");
-        interfaces.getInterfaces().add(registry);
-        cluster.setInterfaces(interfaces);
-
-        // set security properties
-        org.apache.falcon.entity.v0.cluster.Properties props = new org.apache.falcon.entity.v0.cluster.Properties();
-        Property principal2 = new Property();
-        principal2.setName(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL);
-        principal2.setValue(principal);
-        props.getProperties().add(principal2);
-        cluster.setProperties(props);
-        Properties expected = new Properties();
-        expected.put(SecurityUtil.METASTORE_USE_THRIFT_SASL, "true");
-        expected.put(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL, principal);
-        expected.put(SecurityUtil.METASTORE_PRINCIPAL, principal);
-        expected.put(HiveUtil.METASTORE_UGI, "true");
-        expected.put(HiveUtil.NODE, metaStoreUrl.replace("thrift", "hcat"));
-        expected.put(HiveUtil.METASTROE_URI, metaStoreUrl);
-        expected.put(HiveUtil.METASTOREURIS, metaStoreUrl);
-
-        Properties actual = HiveUtil.getHiveCredentials(cluster);
-        Assert.assertTrue(actual.equals(expected));
-    }
-
-
-}
-


[04/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NewRetryTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NewRetryTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NewRetryTest.java
deleted file mode 100644
index 03bc358..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NewRetryTest.java
+++ /dev/null
@@ -1,1182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.MatrixUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.BundleJob;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.CoordinatorJob;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.WorkflowJob;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-import org.joda.time.format.DateTimeFormat;
-import org.joda.time.format.DateTimeFormatter;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Tests with Retries.
- */
-@Test(groups = "embedded")
-public class NewRetryTest extends BaseTestClass {
-
-    private static final Logger LOGGER = Logger.getLogger(NewRetryTest.class);
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-
-    private DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy/MM/dd/HH/mm");
-    private final String baseTestDir = cleanAndGetTestDir();
-    private final String aggregateWorkflowDir = baseTestDir + "/aggregator";
-    private final String lateDir = baseTestDir + "/lateDataTest/testFolders";
-    private final String latePath = lateDir + MINUTE_DATE_PATTERN;
-    private DateTime startDate;
-    private DateTime endDate;
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = new Bundle(BundleUtil.readRetryBundle(), cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        startDate = new DateTime(DateTimeZone.UTC).plusMinutes(1);
-        endDate = new DateTime(DateTimeZone.UTC).plusMinutes(2);
-        bundles[0].setProcessValidity(TimeUtil.dateToOozieDate(startDate.toDate()),
-            TimeUtil.dateToOozieDate(endDate.toDate()));
-
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        feed.setFeedPathValue(latePath).insertLateFeedValue(new Frequency("minutes(8)"));
-        bundles[0].getDataSets().remove(bundles[0].getInputFeedFromBundle());
-        bundles[0].getDataSets().add(feed.toString());
-        bundles[0].setOutputFeedLocationData(baseTestDir + "/output" + MINUTE_DATE_PATTERN);
-        bundles[0].submitClusters(prism);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = true)
-    public void testRetryInProcessZeroAttemptUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            // lets create data now:
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-
-            //schedule process
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            waitTillCertainPercentageOfProcessHasStarted(clusterOC, bundleId, 25);
-
-
-            int defaultRetries = bundles[0].getProcessObject().getRetry().getAttempts();
-
-            retry.setAttempts((0));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            prism.getProcessHelper()
-                .update((bundles[0].getProcessData()), bundles[0].getProcessData());
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId, defaultRetries);
-            checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = true)
-    public void testRetryInProcessLowerAttemptUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            //now wait till the process is over
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            for (int attempt = 0;
-                 attempt < 20 && !validateFailureRetries(clusterOC, bundleId, 1); ++attempt) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(validateFailureRetries(clusterOC, bundleId, 1),
-                "Failure Retry validation failed");
-
-
-            retry.setAttempts((retry.getAttempts() - 2));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-
-            if ((retry.getAttempts() - 2) > 0) {
-                Assert.assertTrue(prism.getProcessHelper()
-                    .update((bundles[0].getProcessData()), bundles[0].getProcessData())
-                    .getMessage().contains("updated successfully"),
-                    "process was not updated successfully");
-                String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                    bundles[0].getProcessName(), EntityType.PROCESS);
-
-                Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-                //now to validate all failed instances to check if they were retried or not.
-                validateRetry(clusterOC, bundleId, retry.getAttempts() - 2);
-                if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                    checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-                }
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessLowerManageableAttemptUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            for (int i = 0; i < 10 && !validateFailureRetries(clusterOC, bundleId, 1); ++i) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(validateFailureRetries(clusterOC, bundleId, 1),
-                "Failure Retry validation failed");
-
-            retry.setAttempts((retry.getAttempts() - 1));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertTrue(prism.getProcessHelper()
-                    .update((bundles[0].getProcessData()), bundles[0].getProcessData())
-                    .getMessage().contains("updated successfully"),
-                "process was not updated successfully");
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId, retry.getAttempts() - 1);
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessLowerBoundaryAttemptUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            for (int attempt = 0;
-                 attempt < 10 && !validateFailureRetries(clusterOC, bundleId, 2); ++attempt) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(validateFailureRetries(clusterOC, bundleId, 2),
-                "Failure Retry validation failed");
-
-
-            retry.setAttempts((2));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertTrue(
-                prism.getProcessHelper()
-                    .update((bundles[0].getProcessData()), bundles[0].getProcessData())
-                    .getMessage().contains("updated successfully"),
-                "process was not updated successfully");
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId, 2);
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            waitTillCertainPercentageOfProcessHasStarted(clusterOC, bundleId, 25);
-
-            retry.setAttempts((4));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertTrue(prism.getProcessHelper()
-                .update(bundles[0].getProcessName(),
-                    null).getMessage()
-                .contains("updated successfully"), "process was not updated successfully");
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId, 4);
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessHigherDelayUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            waitTillCertainPercentageOfProcessHasStarted(clusterOC, bundleId, 25);
-
-            retry.setDelay(new Frequency("minutes(" + (retry.getDelay().getFrequency() + 1) + ")"));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertTrue(
-                prism.getProcessHelper().update(bundles[0].getProcessName(),
-                    bundles[0].getProcessData()).getMessage()
-                    .contains("updated successfully"), "process was not updated successfully");
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessLowerDelayUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            waitTillCertainPercentageOfProcessHasStarted(clusterOC, bundleId, 25);
-
-            retry.setDelay(new Frequency(
-                "minutes(" + (Integer.parseInt(retry.getDelay().getFrequency()) - 1) + ")"));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertTrue(prism.getProcessHelper()
-                    .update(bundles[0].getProcessName(),
-                        bundles[0].getProcessData()).getMessage()
-                    .contains("updated successfully"),
-                "process was not updated successfully");
-            String newBundleId = OozieUtil
-                .getLatestBundleID(clusterOC, bundles[0].getProcessName(),
-                    EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInProcessZeroDelayUpdate(Retry retry) throws Exception {
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        bundles[0].setRetry(retry);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            waitTillCertainPercentageOfProcessHasStarted(clusterOC, bundleId, 25);
-
-            retry.setDelay(new Frequency("minutes(0)"));
-
-            bundles[0].setRetry(retry);
-
-            LOGGER.info("going to update process at:" + DateTime.now(DateTimeZone.UTC));
-            Assert.assertFalse(
-                prism.getProcessHelper().update(bundles[0].getProcessName()
-                    , bundles[0].getProcessData()).getMessage().contains("updated successfully"),
-                "process was updated successfully!!!");
-            String newBundleId = OozieUtil.getLatestBundleID(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS);
-
-            Assert.assertEquals(bundleId, newBundleId, "its creating a new bundle!!!");
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInSimpleFailureCase(Retry retry) throws Exception {
-
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        bundles[0].setProcessLatePolicy(null);
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testUserRetryWhileAutomaticRetriesHappen(Retry retry) throws Exception {
-
-        DateTimeFormatter timeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd/hh:mm");
-
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        LOGGER.info("process dates: " + startDate + "," + endDate);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            for (int attempt = 0;
-                 attempt < 10 && !validateFailureRetries(clusterOC, bundleId, 1); ++attempt) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(validateFailureRetries(clusterOC, bundleId, 1),
-                "Failure Retry validation failed");
-
-            //now start firing random retries
-            LOGGER.info("now firing user reruns:");
-            for (int i = 0; i < 1; i++) {
-                prism.getProcessHelper()
-                    .getProcessInstanceRerun(bundles[0].getProcessName(),
-                        "?start=" + timeFormatter.print(startDate).replace("/", "T") + "Z"
-                            + "&end=" + timeFormatter.print(endDate).replace("/", "T") + "Z");
-            }
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testUserRetryAfterAutomaticRetriesHappen(Retry retry) throws Exception {
-
-        DateTimeFormatter timeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd/hh:mm");
-
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        LOGGER.info("process dates: " + startDate + "," + endDate);
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(),
-                EntityType.PROCESS).get(0);
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-
-            LOGGER.info("now firing user reruns:");
-
-            DateTime[] dateBoundaries = getFailureTimeBoundaries(clusterOC, bundleId);
-            InstancesResult piResult = prism.getProcessHelper()
-                .getProcessInstanceRerun(bundles[0].getProcessName(),
-                    "?start=" + timeFormatter.print(dateBoundaries[0]).replace("/", "T") + "Z"
-                        + "&end=" + timeFormatter.print(dateBoundaries[dateBoundaries.length - 1])
-                         .replace("/", "T") + "Z");
-
-            AssertUtil.assertSucceeded(piResult);
-
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts() + 1);
-
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInSuspendedAndResumeCaseWithLateData(Retry retry) throws Exception {
-
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        feed.setFeedPathValue(latePath);
-        feed.insertLateFeedValue(new Frequency("minutes(10)"));
-        bundles[0].getDataSets().remove(bundles[0].getInputFeedFromBundle());
-        bundles[0].getDataSets().add(feed.toString());
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-            List<DateTime> dates = null;
-
-            for (int i = 0; i < 10 && dates == null; ++i) {
-                dates = OozieUtil.getStartTimeForRunningCoordinators(cluster, bundleId);
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertNotNull(dates, String
-                .format("Start time for running coordinators of bundle: %s should not be null.",
-                    bundleId));
-            LOGGER.info("Start time: " + formatter.print(startDate));
-            LOGGER.info("End time: " + formatter.print(endDate));
-            LOGGER.info("candidate nominal time:" + formatter.print(dates.get(0)));
-
-            for (int attempt = 0;
-                 attempt < 10 && !validateFailureRetries(clusterOC, bundleId, 1); ++attempt) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(validateFailureRetries(clusterOC, bundleId, 1),
-                "Failure Retry validation failed");
-
-            LOGGER.info("now suspending the process altogether....");
-
-            AssertUtil.assertSucceeded(
-                cluster.getProcessHelper().suspend(bundles[0].getProcessData()));
-
-            HashMap<String, Integer> initialMap = getFailureRetriesForEachWorkflow(
-                clusterOC, getDefaultOozieCoordinator(clusterOC, bundleId));
-            LOGGER.info("saved state of workflow retries");
-
-            for (String key : initialMap.keySet()) {
-                LOGGER.info(key + "," + initialMap.get(key));
-            }
-
-            TimeUnit.MINUTES.sleep(10);
-
-
-            HashMap<String, Integer> finalMap = getFailureRetriesForEachWorkflow(
-                clusterOC, getDefaultOozieCoordinator(clusterOC, bundleId));
-            LOGGER.info("final state of process looks like:");
-
-            for (String key : finalMap.keySet()) {
-                LOGGER.info(key + "," + finalMap.get(key));
-            }
-
-            Assert.assertEquals(initialMap.size(), finalMap.size(),
-                "a new workflow retried while process was suspended!!!!");
-
-            for (String key : initialMap.keySet()) {
-                Assert.assertEquals(initialMap.get(key), finalMap.get(key),
-                    "values are different for workflow: " + key);
-            }
-
-            LOGGER.info("now resuming the process...");
-            AssertUtil.assertSucceeded(
-                cluster.getProcessHelper().resume(bundles[0].getProcessData()));
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInLateDataCase(Retry retry) throws Exception {
-
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        feed.setFeedPathValue(latePath);
-
-        feed.insertLateFeedValue(getFrequency(retry));
-
-        bundles[0].getDataSets().remove(bundles[0].getInputFeedFromBundle());
-        bundles[0].getDataSets().add(feed.toString());
-
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            List<String> initialData =
-                Util.getHadoopDataFromDir(clusterFS, bundles[0].getInputFeedFromBundle(),
-                    lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-            List<DateTime> dates = null;
-
-            for (int i = 0; i < 10 && dates == null; ++i) {
-                dates = OozieUtil.getStartTimeForRunningCoordinators(cluster, bundleId);
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertNotNull(dates, String
-                .format("Start time for running coordinators of bundle: %s should not be null.",
-                    bundleId));
-
-            LOGGER.info("Start time: " + formatter.print(startDate));
-            LOGGER.info("End time: " + formatter.print(endDate));
-            LOGGER.info("candidate nominal time:" + formatter.print(dates.get(0)));
-            DateTime now = dates.get(0);
-
-            if (formatter.print(startDate).compareToIgnoreCase(formatter.print(dates.get(0))) > 0) {
-                now = startDate;
-            }
-
-            //now wait till the process is over
-            for (int attempt = 0; attempt < 10 && !validateFailureRetries(
-                clusterOC, bundleId, bundles[0].getProcessObject().getRetry().getAttempts());
-                 ++attempt) {
-                TimeUtil.sleepSeconds(10);
-            }
-            Assert.assertTrue(
-                validateFailureRetries(clusterOC, bundleId,
-                    bundles[0].getProcessObject().getRetry().getAttempts()),
-                "Failure Retry validation failed");
-
-            String insertionFolder =
-                Util.findFolderBetweenGivenTimeStamps(now, now.plusMinutes(5), initialData);
-            LOGGER.info("inserting data in folder " + insertionFolder + " at " + DateTime.now());
-            HadoopUtil.injectMoreData(clusterFS, lateDir + insertionFolder,
-                    OSUtil.concat(OSUtil.OOZIE_EXAMPLE_INPUT_DATA, "lateData"));
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                bundles[0].getProcessObject().getRetry().getAttempts());
-
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    @Test(dataProvider = "DP", groups = {"0.2.2", "retry"}, enabled = false)
-    public void testRetryInDeleteAfterPartialRetryCase(Retry retry) throws Exception {
-
-        FeedMerlin feed = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        feed.setFeedPathValue(latePath);
-        feed.insertLateFeedValue(new Frequency("minutes(1)"));
-        bundles[0].getDataSets().remove(bundles[0].getInputFeedFromBundle());
-        bundles[0].getDataSets().add(feed.toString());
-
-        bundles[0].setRetry(retry);
-
-        for (String data : bundles[0].getDataSets()) {
-            AssertUtil.assertSucceeded(prism.getFeedHelper().submitEntity(data));
-        }
-
-
-        //submit and schedule process
-        ServiceResponse response =
-            prism.getProcessHelper().submitEntity(bundles[0].getProcessData());
-        if (retry.getAttempts() <= 0 || retry.getDelay().getFrequencyAsInt() <= 0) {
-            AssertUtil.assertFailed(response);
-        } else {
-            AssertUtil.assertSucceeded(response);
-            HadoopUtil.deleteDirIfExists(lateDir, clusterFS);
-            HadoopUtil.lateDataReplenish(clusterFS, 20, 0, lateDir);
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().schedule(bundles[0].getProcessData()));
-            //now wait till the process is over
-            String bundleId = OozieUtil.getBundles(clusterOC,
-                bundles[0].getProcessName(), EntityType.PROCESS).get(0);
-
-            validateRetry(clusterOC, bundleId,
-                (bundles[0].getProcessObject().getRetry().getAttempts()) / 2);
-
-            AssertUtil.assertSucceeded(
-                prism.getProcessHelper().delete((bundles[0].getProcessData())));
-
-            if (retry.getPolicy() == PolicyType.EXP_BACKOFF) {
-                TimeUnit.MINUTES.sleep(retry.getDelay().getFrequencyAsInt() * ((retry.getAttempts()
-                    - (bundles[0].getProcessObject().getRetry().getAttempts()) / 2) ^ 2));
-            } else {
-                TimeUnit.MINUTES
-                    .sleep(retry.getDelay().getFrequencyAsInt()
-                        * ((bundles[0].getProcessObject().getRetry().getAttempts())
-                        - (bundles[0].getProcessObject().getRetry().getAttempts()) / 2));
-            }
-
-            //now to validate all failed instances to check if they were retried or not.
-            validateRetry(clusterOC, bundleId,
-                (bundles[0].getProcessObject().getRetry().getAttempts()) / 2);
-
-            if (bundles[0].getProcessObject().getRetry().getAttempts() > 0) {
-                checkIfRetriesWereTriggeredCorrectly(cluster, retry, bundleId);
-            }
-        }
-    }
-
-
-    private void validateRetry(OozieClient oozieClient, String bundleId, int maxNumberOfRetries)
-        throws Exception {
-        //validate that all failed processes were retried the specified number of times.
-        for (int i = 0; i < 60 && getDefaultOozieCoordinator(oozieClient, bundleId) == null; ++i) {
-            TimeUtil.sleepSeconds(10);
-        }
-        final CoordinatorJob defaultCoordinator = getDefaultOozieCoordinator(oozieClient, bundleId);
-        Assert.assertNotNull(defaultCoordinator, "Unexpected value of defaultCoordinator");
-
-        for (int i = 0;
-             i < 60 && !validateFailureRetries(oozieClient, bundleId, maxNumberOfRetries); ++i) {
-            LOGGER.info("desired state not reached, attempt number: " + i);
-            TimeUtil.sleepSeconds(10);
-        }
-        Assert.assertTrue(validateFailureRetries(oozieClient, bundleId, maxNumberOfRetries),
-            "all retries were not attempted correctly!");
-    }
-
-
-    private boolean validateFailureRetries(OozieClient oozieClient, String bundleId,
-                                           int maxNumberOfRetries) throws Exception {
-        final CoordinatorJob coordinator = getDefaultOozieCoordinator(clusterOC, bundleId);
-        if (maxNumberOfRetries < 0) {
-            maxNumberOfRetries = 0;
-        }
-        LOGGER.info("coordinator: " + coordinator);
-        HashMap<String, Boolean> workflowMap = new HashMap<>();
-
-        if (coordinator == null || coordinator.getActions().size() == 0) {
-            return false;
-        }
-        LOGGER.info("coordinator.getActions(): " + coordinator.getActions());
-        for (CoordinatorAction action : coordinator.getActions()) {
-
-            if (null == action.getExternalId()) {
-                return false;
-            }
-
-
-            WorkflowJob actionInfo = oozieClient.getJobInfo(action.getExternalId());
-            LOGGER
-                .info("actionInfo: " + actionInfo + " actionInfo.getRun(): " + actionInfo.getRun());
-
-
-            if (!(actionInfo.getStatus() == WorkflowJob.Status.SUCCEEDED
-                || actionInfo.getStatus() == WorkflowJob.Status.RUNNING)) {
-                if (actionInfo.getRun() == maxNumberOfRetries) {
-                    workflowMap.put(actionInfo.getId(), true);
-                } else {
-                    Assert.assertTrue(actionInfo.getRun() < maxNumberOfRetries,
-                        "The workflow exceeded the max number of retries specified for it!!!!");
-                    workflowMap.put(actionInfo.getId(), false);
-                }
-
-            } else if (actionInfo.getStatus() == WorkflowJob.Status.SUCCEEDED) {
-                workflowMap.put(actionInfo.getId(), true);
-            }
-        }
-
-        //first make sure that the map has all the entries for the coordinator:
-        if (workflowMap.size() != coordinator.getActions().size()) {
-            return false;
-        } else {
-            boolean result = true;
-
-            for (String key : workflowMap.keySet()) {
-                result &= workflowMap.get(key);
-            }
-
-            return result;
-        }
-    }
-
-    public CoordinatorJob getDefaultOozieCoordinator(OozieClient oozieClient, String bundleId)
-        throws Exception {
-        BundleJob bundlejob = oozieClient.getBundleJobInfo(bundleId);
-
-        for (CoordinatorJob coord : bundlejob.getCoordinators()) {
-            if (coord.getAppName().contains("DEFAULT")) {
-                return oozieClient.getCoordJobInfo(coord.getId());
-            }
-        }
-        return null;
-    }
-
-    @DataProvider(name = "DP")
-    public Object[][] getData() {
-
-        String[] retryTypes = new String[]{"periodic", "exp-backoff"}; //,"exp-backoff"
-        Integer[] delays = new Integer[]{2, 0}; //removing -1 since this should be checked at
-                                                // validation level while setting
-        String[] delayUnits = new String[]{"minutes"};
-        Integer[] retryAttempts = new Integer[]{2, 0, 3}; //0,-1,2
-
-        Object[][] crossProd = MatrixUtil
-            .crossProduct(delays, delayUnits, retryTypes, retryAttempts);
-        Object[][] testData = new Object[crossProd.length][1];
-        for (int i = 0; i < crossProd.length; ++i) {
-            final Integer delay = (Integer) crossProd[i][0];
-            final String delayUnit = (String) crossProd[i][1];
-            final String retryType = (String) crossProd[i][2];
-            final Integer retry = (Integer) crossProd[i][3];
-            testData[i][0] = getRetry(delay, delayUnit, retryType, retry);
-        }
-        return testData;
-    }
-
-    private void waitTillCertainPercentageOfProcessHasStarted(OozieClient oozieClient,
-                                                              String bundleId, int percentage)
-        throws Exception {
-        OozieUtil.waitForCoordinatorJobCreation(oozieClient, bundleId);
-        CoordinatorJob defaultCoordinator = getDefaultOozieCoordinator(oozieClient, bundleId);
-
-        // make sure default coordinator is not null before we proceed
-        for (int i = 0; i < 120 && (defaultCoordinator == null || defaultCoordinator.getStatus()
-            == CoordinatorJob.Status.PREP); ++i) {
-            TimeUtil.sleepSeconds(10);
-            defaultCoordinator = getDefaultOozieCoordinator(oozieClient, bundleId);
-        }
-        Assert.assertNotNull(defaultCoordinator, "default coordinator is null");
-        Assert.assertNotEquals(defaultCoordinator.getStatus(), CoordinatorJob.Status.PREP,
-            "Unexpected state for coordinator job: " + defaultCoordinator.getId());
-        int totalCount = defaultCoordinator.getActions().size();
-
-        int percentageConversion = (percentage * totalCount) / 100;
-
-        while (percentageConversion > 0) {
-            int doneBynow = 0;
-            for (CoordinatorAction action : defaultCoordinator.getActions()) {
-                CoordinatorAction actionInfo = oozieClient.getCoordActionInfo(action.getId());
-                if (actionInfo.getStatus() == CoordinatorAction.Status.RUNNING) {
-                    doneBynow++;
-                }
-            }
-            if (doneBynow >= percentageConversion) {
-                break;
-            }
-            TimeUtil.sleepSeconds(10);
-        }
-    }
-
-
-    private HashMap<String, Integer> getFailureRetriesForEachWorkflow(OozieClient oozieClient,
-                                                                      CoordinatorJob coordinator)
-        throws Exception {
-        HashMap<String, Integer> workflowRetryMap = new HashMap<>();
-        for (CoordinatorAction action : coordinator.getActions()) {
-
-            if (null == action.getExternalId()) {
-                continue;
-            }
-
-            WorkflowJob actionInfo = oozieClient.getJobInfo(action.getExternalId());
-            LOGGER.info("adding workflow " + actionInfo.getId() + " to the map");
-            workflowRetryMap.put(actionInfo.getId(), actionInfo.getRun());
-        }
-        return workflowRetryMap;
-    }
-
-    private DateTime[] getFailureTimeBoundaries(OozieClient oozieClient, String bundleId)
-        throws Exception {
-        List<DateTime> dateList = new ArrayList<>();
-
-        CoordinatorJob coordinator = getDefaultOozieCoordinator(oozieClient, bundleId);
-
-        for (CoordinatorAction action : coordinator.getActions()) {
-            if (action.getExternalId() != null) {
-
-                WorkflowJob jobInfo = oozieClient.getJobInfo(action.getExternalId());
-                if (jobInfo.getRun() > 0) {
-                    dateList.add(new DateTime(jobInfo.getStartTime(), DateTimeZone.UTC));
-                }
-            }
-        }
-        Collections.sort(dateList);
-        return dateList.toArray(new DateTime[dateList.size()]);
-    }
-
-    private void checkIfRetriesWereTriggeredCorrectly(ColoHelper coloHelper, Retry retry,
-                                                      String bundleId) throws Exception {
-        //it is presumed that this delay here will be expressed in minutes. Hourly/daily is
-        // unfeasible to check :)
-
-        final DateTimeFormatter timeFormatter = DateTimeFormat.forPattern("HH:mm:ss");
-
-        final OozieClient oozieClient = coloHelper.getFeedHelper().getOozieClient();
-        final CoordinatorJob coordinator = getDefaultOozieCoordinator(oozieClient, bundleId);
-
-        for (CoordinatorAction action : coordinator.getActions()) {
-            CoordinatorAction coordAction = oozieClient.getCoordActionInfo(action.getExternalId());
-            if (!(coordAction.getStatus() == CoordinatorAction.Status.SUCCEEDED)) {
-                int expectedDelay = retry.getDelay().getFrequencyAsInt();
-                //first get data from logs:
-                List<String> instanceRetryTimes =
-                    Util.getInstanceRetryTimes(coloHelper, action.getExternalId());
-                List<String> instanceFinishTimes =
-                    Util.getInstanceFinishTimes(coloHelper, action.getExternalId());
-
-                LOGGER.info("finish times look like:");
-                for (String line : instanceFinishTimes) {
-                    LOGGER.info(line);
-                }
-
-                LOGGER.info("retry times look like:");
-                for (String line : instanceRetryTimes) {
-                    LOGGER.info(line);
-                }
-
-                LOGGER.info("checking timelines for retry type " + retry.getPolicy().value()
-                    + " for delay " + expectedDelay + " for workflow id: " + action.getExternalId());
-
-                if (retry.getPolicy() == PolicyType.PERIODIC) {
-                    //in this case the delay unit will always be a constant time diff
-                    for (int i = 0; i < instanceFinishTimes.size() - 1; i++) {
-                        DateTime temp = timeFormatter.parseDateTime(instanceFinishTimes.get(i));
-
-                        Assert.assertEquals(temp.plusMinutes(expectedDelay).getMillis(),
-                            timeFormatter.parseDateTime(instanceRetryTimes.get(i)).getMillis(),
-                            5000, "oops! this is out of expected delay range for workflow id  "
-                                + action.getExternalId());
-                    }
-                } else {
-                    //check for exponential
-                    for (int i = 0; i < instanceFinishTimes.size() - 1; i++) {
-                        DateTime temp = timeFormatter.parseDateTime(instanceFinishTimes.get(i));
-                        Assert.assertEquals(temp.plusMinutes(expectedDelay).getMillis(),
-                            timeFormatter.parseDateTime(instanceRetryTimes.get(i)).getMillis(),
-                            5000, "oops! this is out of expected delay range for workflow id "
-                                + action.getExternalId());
-                        expectedDelay *= 2;
-                    }
-                }
-            }
-        }
-
-    }
-
-    private Retry getRetry(int delay, String delayUnits, String retryType,
-                           int retryAttempts) {
-        Retry retry = new Retry() {
-            @Override
-            public String toString(){
-                return String.format("Frequency: %s; Attempts: %s; PolicyType: %s",
-                    this.getDelay(), this.getAttempts(), this.getPolicy());
-            }
-        };
-        retry.setAttempts(retryAttempts);
-        retry.setDelay(new Frequency(delayUnits + "(" + delay + ")"));
-        retry.setPolicy(PolicyType.fromValue(retryType));
-        return retry;
-    }
-
-    private Frequency getFrequency(Retry retry) {
-        int delay = retry.getDelay().getFrequencyAsInt();
-        if (delay == 0) {
-            delay = 1;
-        }
-        int attempts = retry.getAttempts();
-        if (attempts == 0) {
-            attempts = 1;
-        }
-
-        if (retry.getPolicy() == PolicyType.EXP_BACKOFF) {
-            delay = (Math.abs(delay)) * (2 ^ (Math.abs(attempts)));
-        } else {
-            delay = Math.abs(delay * attempts);
-        }
-
-        return new Frequency(retry.getDelay().getTimeUnit() + "(" + delay + ")");
-
-    }
-}
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NoOutputProcessTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NoOutputProcessTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NoOutputProcessTest.java
deleted file mode 100644
index 0711e8a..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/NoOutputProcessTest.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.supportClasses.JmsMessageConsumer;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-
-/**
- * Null output process tests.
- */
-@Test(groups = "embedded")
-public class NoOutputProcessTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String testDir = cleanAndGetTestDir();
-    private String inputPath = testDir + "/input" + MINUTE_DATE_PATTERN;
-    private String workflowForNoIpOp = cleanAndGetTestDir();
-    private static final Logger LOGGER = Logger.getLogger(NoOutputProcessTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        uploadDirToClusters(workflowForNoIpOp, OSUtil.concat(OSUtil.RESOURCES, "workflows", "aggregatorNoOutput"));
-        Bundle b = BundleUtil.readELBundle();
-        b.generateUniqueBundle(this);
-        b = new Bundle(b, cluster);
-        String startDate = "2010-01-03T00:00Z";
-        String endDate = "2010-01-03T03:00Z";
-        b.setInputFeedDataPath(inputPath);
-        String prefix = b.getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 20);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0].generateUniqueBundle(this);
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].setProcessWorkflow(workflowForNoIpOp);
-        bundles[0].setInputFeedDataPath(inputPath);
-        bundles[0].setProcessValidity("2010-01-03T02:30Z", "2010-01-03T02:45Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        ProcessMerlin process = bundles[0].getProcessObject();
-        process.setOutputs(null);
-        process.setLateProcess(null);
-        bundles[0].submitFeedsScheduleProcess(prism);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Waits till process ends successfully. Check that JMS messages related to entities
-     * reflects info about succeeded process instances as expected.
-     * @throws Exception
-     */
-    @Test(enabled = true, groups = {"singleCluster"})
-    public void checkForJMSMsgWhenNoOutput() throws Exception {
-        LOGGER.info("attaching messageConsumer to:   " + "FALCON.ENTITY.TOPIC");
-        JmsMessageConsumer messageConsumer =
-            new JmsMessageConsumer("FALCON.ENTITY.TOPIC", cluster.getClusterHelper().getActiveMQ());
-        messageConsumer.start();
-
-        //wait for all the instances to complete
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        messageConsumer.interrupt();
-        Util.printMessageData(messageConsumer);
-        Assert.assertEquals(messageConsumer.getReceivedMessages().size(), 3,
-            " Message for all the 3 instance not found");
-    }
-
-    /**
-     * Waits till process ends successfully. Checks that JMS messages related to entities
-     * and to particular process reflects info about succeeded process instances as expected.
-     * @throws Exception
-     */
-    @Test(enabled = true, groups = {"singleCluster"})
-    public void rm() throws Exception {
-        JmsMessageConsumer consumerEntityMsg =
-            new JmsMessageConsumer("FALCON.ENTITY.TOPIC", cluster.getClusterHelper().getActiveMQ());
-        JmsMessageConsumer consumerProcessMsg =
-            new JmsMessageConsumer("FALCON." + bundles[0].getProcessName(),
-                cluster.getClusterHelper().getActiveMQ());
-        consumerEntityMsg.start();
-        consumerProcessMsg.start();
-
-        //wait for all the instances to complete
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 3,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        consumerEntityMsg.interrupt();
-        consumerProcessMsg.interrupt();
-        Util.printMessageData(consumerEntityMsg);
-        Util.printMessageData(consumerProcessMsg);
-        Assert.assertEquals(consumerEntityMsg.getReceivedMessages().size(), 3,
-            " Message for all the 3 instance not found");
-        Assert.assertEquals(consumerProcessMsg.getReceivedMessages().size(), 3,
-            " Message for all the 3 instance not found");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessFrequencyTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessFrequencyTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessFrequencyTest.java
deleted file mode 100644
index b0480e9..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessFrequencyTest.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.FreqType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-
-/**
- * Test process with different frequency combinations.
- */
-@Test(groups = "embedded")
-public class ProcessFrequencyTest extends BaseTestClass {
-    private static final Logger LOGGER = Logger.getLogger(ProcessFrequencyTest.class);
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Test Process submission with different frequency. Expecting process workflow to run
-     * successfully.
-     * @throws Exception
-     */
-    @Test(dataProvider = "generateProcessFrequencies")
-    public void testProcessWithFrequency(final FreqType freqType, final int freqAmount)
-        throws Exception {
-        final String startDate = "2010-01-02T01:00Z";
-        final String endDate = "2010-01-02T01:01Z";
-        final String inputPath = baseTestHDFSDir + "/input/";
-        bundles[0].setInputFeedDataPath(inputPath + freqType.getPathValue());
-        bundles[0].setOutputFeedLocationData(
-            baseTestHDFSDir + "/output-data/" + freqType.getPathValue());
-        bundles[0].setProcessPeriodicity(freqAmount, freqType.getFalconTimeUnit());
-        bundles[0].setProcessInputStartEnd("now(0,0)", "now(0,0)");
-        bundles[0].setProcessValidity(startDate, endDate);
-        HadoopUtil.deleteDirIfExists(inputPath, clusterFS);
-        bundles[0].submitFeedsScheduleProcess(prism);
-
-        //upload data
-        final String startPath = inputPath + freqType.getFormatter().print(
-            TimeUtil.oozieDateToDate(startDate));
-        HadoopUtil.copyDataToFolder(clusterFS, startPath, OSUtil.NORMAL_INPUT);
-
-        final String processName = bundles[0].getProcessName();
-        //InstanceUtil.waitTillInstancesAreCreated(cluster, bundles[0].getProcessData(), 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getRunningInstance(processName);
-        InstanceUtil.validateSuccessWOInstances(r);
-    }
-
-    @DataProvider(name = "generateProcessFrequencies")
-    public Object[][] generateProcessFrequencies() {
-        return new Object[][] {
-            {FreqType.MINUTELY, 2, },
-            {FreqType.HOURLY, 3, },
-            {FreqType.DAILY, 5, },
-            {FreqType.MONTHLY, 7, },
-        };
-    }
-
-    /**
-     * Test Process submission with bad frequency. Expecting submissions to fails.
-     * @throws Exception
-     */
-    @Test
-    public void testProcessWithBadFrequency()
-        throws Exception {
-        final String startDate = "2010-01-02T01:00Z";
-        final String endDate = "2010-01-02T01:01Z";
-        final String inputPath = baseTestHDFSDir + "/input/";
-        final FreqType freqType = FreqType.MINUTELY;
-        bundles[0].setInputFeedDataPath(inputPath + freqType.getPathValue());
-        bundles[0].setOutputFeedLocationData(
-            baseTestHDFSDir + "/output-data/" + freqType.getPathValue());
-        bundles[0].submitClusters(prism);
-        bundles[0].submitFeeds(prism);
-
-        bundles[0].setProcessInputStartEnd("now(0,0)", "now(0,0)");
-        bundles[0].setProcessValidity(startDate, endDate);
-        final ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        //a frequency can be bad in two ways - it can have bad amount or it can have bad unit
-        //submit process with bad amount
-        processMerlin.setFrequency(new Frequency("BadAmount", freqType.getFalconTimeUnit()));
-        AssertUtil.assertFailed(prism.getProcessHelper().submitEntity(processMerlin.toString()));
-
-        //submit process with bad unit
-        processMerlin.setFrequency(new Frequency("2993", freqType.getFalconTimeUnit()));
-        final String process = processMerlin.toString();
-        final String newProcess = process.replaceAll("minutes\\(2993\\)", "BadUnit(2993)");
-        AssertUtil.assertFailed(prism.getProcessHelper().submitEntity(newProcess));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceColoMixedTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceColoMixedTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceColoMixedTest.java
deleted file mode 100644
index 91d39a7..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceColoMixedTest.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction.Status;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * Process instance mixed colo tests.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceColoMixedTest extends BaseTestClass {
-
-    private final String baseTestHDFSDir = cleanAndGetTestDir();
-    private final String feedPath = baseTestHDFSDir + "/feed0%d" + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private ColoHelper cluster1 = servers.get(0);
-    private ColoHelper cluster2 = servers.get(1);
-    private FileSystem cluster1FS = serverFS.get(0);
-    private FileSystem cluster2FS = serverFS.get(1);
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceColoMixedTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void prepareClusters() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(cluster1FS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        HadoopUtil.uploadDir(cluster2FS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        //generate bundles according to config files
-        bundles[0] = new Bundle(BundleUtil.readELBundle(), cluster1);
-        bundles[1] = new Bundle(BundleUtil.readELBundle(), cluster2);
-        bundles[0].generateUniqueBundle(this);
-        bundles[1].generateUniqueBundle(this);
-
-        //set cluster colos
-        bundles[0].setCLusterColo(cluster1.getClusterHelper().getColoName());
-        LOGGER.info("cluster b1: " + Util.prettyPrintXml(bundles[0].getClusters().get(0)));
-        bundles[1].setCLusterColo(cluster2.getClusterHelper().getColoName());
-        LOGGER.info("cluster b2: " + Util.prettyPrintXml(bundles[1].getClusters().get(0)));
-
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[1].setProcessWorkflow(aggregateWorkflowDir);
-        //submit 2 clusters
-        Bundle.submitCluster(bundles[0], bundles[1]);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @Test(timeOut = 12000000)
-    public void mixed01C1sC2sC1eC2e() throws Exception {
-        //ua1 and ua3 are source. ua2 target.   feed01 on ua1 , feed02 on ua3
-        //get 2 unique feeds
-        FeedMerlin feed01 = new FeedMerlin(bundles[0].getInputFeedFromBundle());
-        FeedMerlin feed02 = new FeedMerlin(bundles[1].getInputFeedFromBundle());
-        FeedMerlin outputFeed = new FeedMerlin(bundles[0].getOutputFeedFromBundle());
-        //set source and target for the 2 feeds
-
-        //set clusters to null;
-        feed01.clearFeedClusters();
-        feed02.clearFeedClusters();
-        outputFeed.clearFeedClusters();
-
-        //set new feed input data
-        feed01.setFeedPathValue(String.format(feedPath, 1));
-        feed02.setFeedPathValue(String.format(feedPath, 2));
-
-        //generate data in both the colos ua1 and ua3
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(
-            TimeUtil.getTimeWrtSystemTime(-35), TimeUtil.getTimeWrtSystemTime(25), 1);
-
-        String prefix = feed01.getFeedPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), cluster1FS);
-        HadoopUtil.flattenAndPutDataInFolder(cluster1FS, OSUtil.SINGLE_FILE, prefix, dataDates);
-
-        prefix = feed02.getFeedPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), cluster2FS);
-        HadoopUtil.flattenAndPutDataInFolder(cluster2FS, OSUtil.SINGLE_FILE, prefix, dataDates);
-
-        String startTime = TimeUtil.getTimeWrtSystemTime(-70);
-
-        //set clusters for feed01
-        feed01.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        feed01.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.TARGET)
-                .build());
-
-        //set clusters for feed02
-        feed02.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.TARGET)
-                .build());
-        feed02.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-
-        //set clusters for output feed
-        outputFeed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.SOURCE)
-                .build());
-        outputFeed.addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(10000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.TARGET)
-                .build());
-
-        //submit and schedule feeds
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed01.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(feed02.toString()));
-        AssertUtil.assertSucceeded(prism.getFeedHelper().submitAndSchedule(outputFeed.toString()));
-
-        String processStartTime = TimeUtil.getTimeWrtSystemTime(-16);
-        // String processEndTime = InstanceUtil.getTimeWrtSystemTime(20);
-
-        ProcessMerlin process = bundles[0].getProcessObject();
-        process.clearProcessCluster();
-        process.addProcessCluster(
-            new ProcessMerlin.ProcessClusterBuilder(
-                Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withValidity(processStartTime, TimeUtil.addMinsToTime(processStartTime, 35))
-                .build());
-        process.addProcessCluster(
-            new ProcessMerlin.ProcessClusterBuilder(
-                Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withValidity(TimeUtil.addMinsToTime(processStartTime, 16),
-                    TimeUtil.addMinsToTime(processStartTime, 45))
-                .build());
-        process.addInputFeed(feed02.getName(), feed02.getName());
-
-        //submit and schedule process
-        prism.getProcessHelper().submitAndSchedule(process.toString());
-
-        LOGGER.info("Wait till process goes into running ");
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(0), process.getName(), 1,
-            Status.RUNNING, EntityType.PROCESS);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(1), process.getName(), 1,
-            Status.RUNNING, EntityType.PROCESS);
-
-        InstancesResult responseInstance = prism.getProcessHelper().getProcessInstanceStatus(process.getName(),
-                "?start=" + processStartTime + "&end=" + TimeUtil.addMinsToTime(processStartTime, 45));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = prism.getProcessHelper().getProcessInstanceSuspend(process.getName(),
-            "?start=" + TimeUtil.addMinsToTime(processStartTime, 37)
-                + "&end=" + TimeUtil.addMinsToTime(processStartTime, 44));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = prism.getProcessHelper().getProcessInstanceStatus(process.getName(),
-            "?start=" + TimeUtil.addMinsToTime(processStartTime, 37)
-                + "&end=" + TimeUtil.addMinsToTime(processStartTime, 44));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = prism.getProcessHelper().getProcessInstanceResume(process.getName(),
-            "?start=" + processStartTime + "&end=" + TimeUtil.addMinsToTime(processStartTime, 7));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = prism.getProcessHelper().getProcessInstanceStatus(process.getName(),
-            "?start=" + TimeUtil.addMinsToTime(processStartTime, 16)
-                + "&end=" + TimeUtil.addMinsToTime(processStartTime, 45));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = cluster1.getProcessHelper().getProcessInstanceKill(process.getName(),
-            "?start=" + processStartTime + "&end="+ TimeUtil.addMinsToTime(processStartTime, 7));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-
-        responseInstance = prism.getProcessHelper().getProcessInstanceRerun(process.getName(),
-            "?start=" + processStartTime + "&end=" + TimeUtil.addMinsToTime(processStartTime, 7));
-        AssertUtil.assertSucceeded(responseInstance);
-        Assert.assertTrue(responseInstance.getInstances() != null);
-    }
-}
-


[35/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/FalconRadixUtils.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/FalconRadixUtils.java b/common/src/main/java/org/apache/falcon/util/FalconRadixUtils.java
deleted file mode 100644
index 35f03b9..0000000
--- a/common/src/main/java/org/apache/falcon/util/FalconRadixUtils.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.common.FeedDataPath;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Falcon specific utilities for the Radix Tree.
- */
-public class FalconRadixUtils {
-
-    /**
-     * This interface implements the various algorithms to compare node's key with input based on whether you want
-     * a regular expression based algorithm or a character by character matching algorithm.
-     */
-    public interface INodeAlgorithm {
-
-        /**
-         * Checks if the given key and input match.
-         * @param key key of the node
-         * @param input input String to be matched against key.
-         * @return true if key and input match.
-         */
-        boolean match(String key, String input);
-
-        boolean startsWith(String key, String input);
-
-        /**
-         * Finds next node to take for traversal among currentNode's children.
-         * @param currentNode of RadixTree which has been matched.
-         * @param input input String to be searched.
-         * @return Node to be traversed next.
-         */
-        RadixNode getNextCandidate(RadixNode currentNode, String input);
-
-        // for the given node and input key, finds the remainingText to be matched with child sub tree.
-        String getRemainingText(RadixNode currentNode, String key);
-    }
-
-    /**
-     * This Algorithm does a plain string comparison for all
-     * type of operations on a node.
-     */
-    static class StringAlgorithm implements INodeAlgorithm {
-
-        @Override
-        public boolean match(String key, String input) {
-            return StringUtils.equals(key, input);
-        }
-
-        @Override
-        public boolean startsWith(String nodeKey, String inputKey) {
-            return inputKey.startsWith(nodeKey);
-        }
-
-        @Override
-        public RadixNode getNextCandidate(RadixNode currentNode, String input) {
-            RadixNode newRoot = null;
-            String remainingText = input.substring(currentNode.getKey().length());
-            List<RadixNode> result = currentNode.getChildren();
-            for(RadixNode child : result){
-                if (child.getKey().charAt(0) == remainingText.charAt(0)){
-                    newRoot = child;
-                    break;
-                }
-            }
-            return newRoot;
-        }
-
-        @Override
-        public String getRemainingText(RadixNode currentNode, String key) {
-            return key.substring(currentNode.getKey().length());
-        }
-
-
-    }
-
-    /**
-     * Regular Expression Algorithm for the radix tree.
-     *
-     * It traverses the radix tree and matches expressions like ${YEAR} etc. with their allowable values e.g. 2014
-     */
-    public static class FeedRegexAlgorithm implements INodeAlgorithm {
-
-        /**
-         * This function matches a feed path template with feed instance's path string.
-         *
-         * Key is assumed to be a feed's path template and inputString is assumed to be an instance's path string.
-         * Variable/Regex parts of the feed's template are matched against the corresponding parts in inputString
-         * using regular expression and for other parts a character by character match is performed.
-         * e.g. Given templateString (/data/cas/${YEAR}/${MONTH}/${DAY}) and inputString (/data/cas/2014/09/09)
-         * the function will return true.
-         * @param templateString Node's key (Feed's template path)
-         * @param inputString inputString String to be matched against templateString(instance's path)
-         * @return true if the templateString and inputString match, false otherwise.
-         */
-        @Override
-        public boolean match(String templateString, String inputString) {
-            if (StringUtils.isBlank(templateString)) {
-                return false;
-            }
-            // Divide the templateString and inputString into templateParts of regex and character matches
-            List<String> templateParts = getPartsInPathTemplate(templateString);
-            List<String> inputStringParts = getCorrespondingParts(inputString, templateParts);
-
-            if (inputStringParts.size() != templateParts.size()) {
-                return false;
-            }
-
-            int counter = 0;
-            while (counter < inputStringParts.size()) {
-                if (!matchPart(templateParts.get(counter), inputStringParts.get(counter))) {
-                    return false;
-                }
-                counter++;
-            }
-            return true;
-        }
-
-
-        /**
-         *
-         * Finds if the current node's key is a prefix of the given inputString or not.
-         *
-         * @param inputTemplate inputTemplate String
-         * @param inputString inputString to be checked
-         * @return true if inputString starts with inputTemplate, false otherwise.
-         */
-        @Override
-        public boolean startsWith(String inputTemplate, String inputString) {
-
-            if (StringUtils.isBlank(inputString)) {
-                return false;
-            }
-            if (StringUtils.isBlank(inputTemplate)) {
-                return true;
-            }
-
-            // divide inputTemplate and inputString into corresponding templateParts of regex and character only strings
-            List<String> templateParts = getPartsInPathTemplate(inputTemplate);
-            List<String> remainingPattern = getCorrespondingParts(inputString, templateParts);
-
-            if (templateParts.size() > remainingPattern.size()) {
-                return false;
-            }
-
-            int counter = 0;
-            // compare part by part till the templateParts end
-            for (String templatePart : templateParts) {
-                String part = remainingPattern.get(counter);
-                if (!matchPart(templatePart, part)) {
-                    return false;
-                }
-                counter++;
-            }
-            return true;
-        }
-
-        @Override
-        public RadixNode getNextCandidate(RadixNode currentNode, String input) {
-            RadixNode newRoot = null;
-            // replace the regex with pattern's length
-            String remainingText = input.substring(getPatternsEffectiveLength(currentNode.getKey()));
-            List<RadixNode> result = currentNode.getChildren();
-            for(RadixNode child : result) {
-                String key = child.getKey();
-                if (key.startsWith("${")) {
-                    // get the regex
-                    String regex = key.substring(0, key.indexOf("}") + 1);
-                    // match the text and the regex
-                    FeedDataPath.VARS var = getMatchingRegex(regex);
-                    if (matchPart(regex, remainingText.substring(0, var.getValueSize()))) {
-                        newRoot = child; // if it matches then this is the newRoot
-                        break;
-                    }
-                } else if (child.getKey().charAt(0) == remainingText.charAt(0)) {
-                    newRoot = child;
-                    break;
-                }
-            }
-            return newRoot;
-        }
-
-        @Override
-        public String getRemainingText(RadixNode currentNode, String inputString) {
-            // find the match length for current inputString
-            return inputString.substring(getPatternsEffectiveLength(currentNode.getKey()));
-        }
-
-        private int getPatternsEffectiveLength(String templateString) {
-            if (StringUtils.isBlank(templateString)) {
-                return 0;
-            }
-
-            // Since we are only interested in the length, can replace pattern with a random string
-            for (FeedDataPath.VARS var : FeedDataPath.VARS.values()) {
-                templateString = templateString.replace("${" + var.name() + "}",
-                        RandomStringUtils.random(var.getValueSize()));
-            }
-
-            return templateString.length();
-        }
-
-        /**
-         * Divide a given template string into parts of regex and character strings
-         * e.g. /data/cas/${YEAR}/${MONTH}/${DAY} will be converted to
-         * [/data/cas/, ${YEAR}, /, ${MONTH}, /, ${DAY}]
-         * @param templateString input string representing a feed's path template
-         * @return list of parts in input templateString which are either completely regex or normal string.
-         */
-        private List<String> getPartsInPathTemplate(String templateString) {
-            //divide the node's templateString in parts of regular expression and normal string
-            List<String> parts = new ArrayList<String>();
-            Matcher matcher = FeedDataPath.PATTERN.matcher(templateString);
-            int currentIndex = 0;
-            while (matcher.find()) {
-                parts.add(templateString.substring(currentIndex, matcher.start()));
-                parts.add(matcher.group());
-                currentIndex = matcher.end();
-            }
-            if (currentIndex != templateString.length()) {
-                parts.add(templateString.substring(currentIndex));
-            }
-            return Collections.unmodifiableList(parts);
-        }
-
-
-        private FeedDataPath.VARS getMatchingRegex(String inputPart) {
-            //inputPart will be something like ${YEAR}
-            for (FeedDataPath.VARS var : FeedDataPath.VARS.values()) {
-                if (inputPart.equals("${" + var.name() + "}")) {
-                    return var;
-                }
-            }
-            return null;
-        }
-
-
-        /**
-         * Divides a string into corresponding parts for the template to carry out comparison.
-         * templateParts = [/data/cas/, ${YEAR}, /, ${MONTH}, /, ${DAY}]
-         * inputString = /data/cas/2014/09/09
-         * returns [/data/cas/, 2014, /, 09, /, 09]
-         * @param inputString normal string representing feed instance path
-         * @param templateParts parts of feed's path template broken into regex and non-regex units.
-         * @return a list of strings where each part of the list corresponds to a part in list of template parts.
-         */
-        private List<String> getCorrespondingParts(String inputString, List<String> templateParts) {
-            List<String> stringParts = new ArrayList<String>();
-            int counter = 0;
-            while (StringUtils.isNotBlank(inputString) && counter < templateParts.size()) {
-                String currentTemplatePart = templateParts.get(counter);
-                int length = Math.min(getPatternsEffectiveLength(currentTemplatePart), inputString.length());
-                stringParts.add(inputString.substring(0, length));
-                inputString = inputString.substring(length);
-                counter++;
-            }
-            if (StringUtils.isNotBlank(inputString)) {
-                stringParts.add(inputString);
-            }
-            return stringParts;
-        }
-
-        /**
-         * Compare a pure regex or pure string part with a given string.
-         *
-         * @param template template part, which can either be a pure regex or pure non-regex string.
-         * @param input input String to be matched against the template part.
-         * @return true if the input string matches the template, in case of a regex component a regex comparison is
-         * made, else a character by character comparison is made.
-         */
-        private boolean matchPart(String template, String input) {
-            if (template.startsWith("${")) { // if the part begins with ${ then it's a regex part, do regex match
-                template = template.replace("${", "\\$\\{");
-                template = template.replace("}", "\\}");
-                for (FeedDataPath.VARS var : FeedDataPath.VARS.values()) {//find which regex is this
-                    if (StringUtils.equals(var.regex(), template)) {// regex found, do matching
-                        //find part of the input string which should be matched against regex
-                        String desiredPart = input.substring(0, var.getValueSize());
-                        Pattern pattern = Pattern.compile(var.getValuePattern());
-                        Matcher matcher = pattern.matcher(desiredPart);
-                        if (!matcher.matches()) {
-                            return false;
-                        }
-                        return true;
-                    }
-                }
-                return false;
-            } else {// do exact match with normal strings
-                if (!input.startsWith(template)) {
-                    return false;
-                }
-            }
-            return true;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/HadoopQueueUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/HadoopQueueUtil.java b/common/src/main/java/org/apache/falcon/util/HadoopQueueUtil.java
deleted file mode 100644
index cc48402..0000000
--- a/common/src/main/java/org/apache/falcon/util/HadoopQueueUtil.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.FalconException;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Utility class to get the Hadoop Queue names by querying resource manager.
- */
-public final class HadoopQueueUtil {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HadoopQueueUtil.class);
-
-    private HadoopQueueUtil() {
-        // make the constructor private
-    }
-
-    /**
-     * Uses Resource Manager REST API to get the hadoop scheduler info.
-     *
-     * @param rmBaseUrlStr
-     * @return JSON string representing hadoop Scheduler Info
-     * @throws FalconException
-     */
-
-    public static String getHadoopClusterSchedulerInfo(String rmBaseUrlStr) throws FalconException {
-        KerberosAuthenticator kAUTHENTICATOR = new KerberosAuthenticator();
-        AuthenticatedURL.Token authenticationToken = new AuthenticatedURL.Token();
-        String rmSchedulerInfoURL = rmBaseUrlStr;
-        if (!rmSchedulerInfoURL.endsWith("/")) {
-            rmSchedulerInfoURL += "/";
-        }
-        rmSchedulerInfoURL += "ws/v1/cluster/scheduler";
-        HttpURLConnection conn = null;
-        BufferedReader reader = null;
-
-        try {
-            URL url = new URL(rmSchedulerInfoURL);
-            conn = new AuthenticatedURL(kAUTHENTICATOR).openConnection(url, authenticationToken);
-            reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
-            StringBuilder jsonResponse =  new StringBuilder();
-            String line;
-            while ((line = reader.readLine()) != null) {
-                jsonResponse.append(line);
-            }
-            return jsonResponse.toString();
-        } catch (Exception ex) {
-            throw new RuntimeException("Could not authenticate, " + ex.getMessage(), ex);
-        } finally {
-            IOUtils.closeQuietly(reader);
-            if (conn != null) {
-                conn.disconnect();
-            }
-        }
-
-    }
-
-    /**
-     *
-     *
-     * @param jsonResult
-     * @param qNames
-     * @return
-     * @throws JSONException
-     */
-
-    public static Set<String> getHadoopClusterQueueNamesHelper(String jsonResult, Set<String> qNames)
-        throws JSONException {
-        String qJson = extractRootQueuesElement(jsonResult);
-        LOG.debug("Extracted Queue JSON - {}", qJson);
-        JSONObject jObject = new JSONObject(qJson);
-        LOG.debug("Parsing Json result done");
-        JSONObject queues = jObject.getJSONObject("queues");
-        jsonParseForHadoopQueueNames(queues, qNames);
-        return qNames;
-    }
-
-    /**
-     * Recursively parses JSON hadoop cluster scheduler info and returns all the sub queue names in the output
-     * parameter.
-     *
-     * @param queues JSON document queues element
-     * @param qNames Output parameter that will have all hadoop cluster queue names
-     * @throws JSONException
-     *
-     */
-    public static void jsonParseForHadoopQueueNames(JSONObject queues, Set<String> qNames) throws JSONException {
-        JSONArray qs = queues.getJSONArray("queue");
-        for(int i=0; i<qs.length(); i++) {
-            JSONObject q = qs.getJSONObject(i);
-            qNames.add(q.getString("queueName"));
-
-            if ((q.isNull("type"))
-                    || (!q.getString("type").equalsIgnoreCase("capacitySchedulerLeafQueueInfo"))) {
-                jsonParseForHadoopQueueNames(q.getJSONObject("queues"), qNames);
-            }
-        }
-    }
-    /**
-     * Parse the hadoop cluster scheduler info to extract JSON element 'queues'.
-     *
-     * NOTE: the JSON returned by Resource Manager REST API is not well formed
-     * and trying to parse the entire returned document results in parse exception
-     * using latest JSON parsers.
-     *
-     * @param json
-     * @return
-     */
-
-    public static String extractRootQueuesElement(String json) {
-        int start = json.indexOf("\"queues\":");
-        int i = start;
-        while(json.charAt(i) != '{') {
-            i++;
-        }
-        i++;
-        int count = 1;
-        while (count != 0) {
-            if (json.charAt(i) == '{') {
-                count++;
-            } else if (json.charAt(i) == '}') {
-                count--;
-            }
-            i++;
-        }
-        return "{" + json.substring(start, i) + "}";
-    }
-
-    /**
-     * Retrieves scheduler info JSON from the resource manager and extracts hadoop cluster queue names into
-     * a set of strings.
-     *
-     * @param rmBaseUrlStr
-     * @return
-     * @throws FalconException
-     */
-
-    public static Set<String> getHadoopClusterQueueNames(String rmBaseUrlStr) throws FalconException {
-        String jsonResult = getHadoopClusterSchedulerInfo(rmBaseUrlStr);
-        LOG.debug("Scheduler Info Result : {} ", jsonResult);
-        Set<String> qNames = new HashSet<>();
-        try {
-            return getHadoopClusterQueueNamesHelper(jsonResult, qNames);
-        } catch(JSONException jex) {
-            throw new FalconException(jex);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/HdfsClassLoader.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/HdfsClassLoader.java b/common/src/main/java/org/apache/falcon/util/HdfsClassLoader.java
deleted file mode 100644
index bacc092..0000000
--- a/common/src/main/java/org/apache/falcon/util/HdfsClassLoader.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Helper class loader that fetches jars from HDFS location and loads into JVM.
- */
-
-public class HdfsClassLoader extends URLClassLoader {
-
-    private static final Logger LOG = LoggerFactory.getLogger(HdfsClassLoader.class);
-    private static Map<String, HdfsClassLoader>  classLoaderCache = new ConcurrentHashMap<String, HdfsClassLoader>();
-    private static final Object LOCK = new Object();
-
-    public static ClassLoader load(final String name, final List<String> jarHdfsPath) throws IOException {
-        LOG.info("ClassLoader cache size = " + classLoaderCache.size());
-        if (classLoaderCache.containsKey(name)) {
-            return classLoaderCache.get(name);
-        }
-
-        synchronized (LOCK) {
-            final URL[] urls = copyHdfsJarFilesToTempDir(name, jarHdfsPath);
-            LOG.info("Copied jar files from HDFS to local dir");
-            final ClassLoader parentClassLoader = HdfsClassLoader.class.getClassLoader();
-            HdfsClassLoader hdfsClassLoader = java.security.AccessController.doPrivileged(
-                    new java.security.PrivilegedAction<HdfsClassLoader>() {
-                        @Override
-                        public HdfsClassLoader run() {
-                            return new HdfsClassLoader(name, urls, parentClassLoader);
-                        }
-                    }
-            );
-            LOG.info("Created a new HdfsClassLoader for name = {} with parent = {} using classpath = {}",
-                    name, parentClassLoader.toString(),  Arrays.toString(jarHdfsPath.toArray()));
-            classLoaderCache.put(name, hdfsClassLoader);
-            return hdfsClassLoader;
-        }
-    }
-
-    private final ClassLoader realParent;
-
-    public HdfsClassLoader(String name, URL[] urls, ClassLoader parentClassLoader) {
-        // set the 'parent' member to null giving an option for this class loader
-        super(urls, null);
-        this.realParent = parentClassLoader;
-    }
-
-    @Override
-    protected Class<?> loadClass(String name, boolean resolve)
-        throws ClassNotFoundException {
-
-        // Load through the parent class loader first and then fallback to this class loader.
-        try {
-            return realParent.loadClass(name);
-        } catch (Throwable t) {
-            return super.loadClass(name, resolve);
-        }
-    }
-
-    @Override
-    public URL getResource(String name) {
-        // This is the same as the jdk's getResource except the parent
-        // is taken from the realParent member instead of the parent member.
-        URL url = realParent.getResource(name);
-        if (url == null) {
-            url = findResource(name);
-        }
-        return url;
-    }
-
-    private static URL[] copyHdfsJarFilesToTempDir(String databaseName, List<String> jars) throws IOException {
-        List<URL> urls = new ArrayList<URL>();
-
-        final Configuration conf = new Configuration();
-        Path localPath = createTempDir(databaseName, conf);
-
-        for (String jar : jars) {
-            Path jarPath = new Path(jar);
-            final FileSystem fs = jarPath.getFileSystem(conf);
-            if (fs.isFile(jarPath) && jarPath.getName().endsWith(".jar")) {
-                LOG.info("Copying jarFile = " + jarPath);
-                fs.copyToLocalFile(jarPath, localPath);
-            }
-        }
-        urls.addAll(getJarsInPath(localPath.toUri().toURL()));
-
-        return urls.toArray(new URL[urls.size()]);
-    }
-
-    private static Path createTempDir(String databaseName, Configuration conf) throws IOException {
-        String tmpBaseDir = String.format("file://%s", System.getProperty("java.io.tmpdir"));
-        if (StringUtils.isBlank(tmpBaseDir)) {
-            tmpBaseDir = "file:///tmp";
-        }
-        Path localPath = new Path(tmpBaseDir, databaseName);
-        localPath.getFileSystem(conf).mkdirs(localPath);
-        return localPath;
-    }
-
-    private static List<URL> getJarsInPath(URL fileURL) throws MalformedURLException {
-        List<URL> urls = new ArrayList<URL>();
-
-        File file = new File(fileURL.getPath());
-        if (file.isDirectory()) {
-            File[] jarFiles = file.listFiles(new FileFilter() {
-                @Override
-                public boolean accept(File file) {
-                    return file.isFile() && file.getName().endsWith(".jar");
-                }
-            });
-
-            if (jarFiles != null) {
-                for (File jarFile : jarFiles) {
-                    urls.add(jarFile.toURI().toURL());
-                }
-            }
-
-            if (!fileURL.toString().endsWith("/")) {
-                fileURL = new URL(fileURL.toString() + "/");
-            }
-        }
-
-        urls.add(fileURL);
-        return urls;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/RadixNode.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/RadixNode.java b/common/src/main/java/org/apache/falcon/util/RadixNode.java
deleted file mode 100644
index 35d7ef3..0000000
--- a/common/src/main/java/org/apache/falcon/util/RadixNode.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Represents a node in Radix Tree.
- *
- * Each node contains a part of the key, links to it's children and a collection of values
- * stored against the key(if the node is the suffix of a key)
- *
- */
-public class RadixNode<T> {
-
-    private String key;
-
-    private List<RadixNode<T>> children;
-
-    private boolean isTerminal;
-
-    private Set<T> values;
-
-    public RadixNode(){
-        key = "";
-        children = new LinkedList<RadixNode<T>>();
-        isTerminal = false;
-        values = new HashSet<T>();
-    }
-
-    public String getKey() {
-        return key;
-    }
-
-    public void setKey(String key) {
-        this.key = key;
-    }
-
-    public List<RadixNode<T>> getChildren() {
-        return children;
-    }
-
-    public void setChildren(List<RadixNode<T>> children) {
-        this.children = children;
-    }
-
-    public boolean isTerminal() {
-        return isTerminal;
-    }
-
-    public void setTerminal(boolean isTerminalNew) {
-        this.isTerminal = isTerminalNew;
-    }
-
-    /**
-     * Root node is the node with a token string(empty String in our case)
-     * as key.
-     *
-     * @return True if the node is root Node, False otherwise
-     */
-    public boolean isRoot(){
-        return StringUtils.equals(key, "");
-    }
-
-    public Collection<T> getValues() {
-        return Collections.unmodifiableCollection(values);
-    }
-
-    public void setValues(Collection<T> newValues) {
-        values = new HashSet<T>();
-        values.addAll(newValues);
-    }
-
-    public void addValue(T value){
-        values.add(value);
-    }
-
-    public void removeValue(T value) {
-        values.remove(value);
-    }
-    public void removeAll() {
-        values.clear();
-    }
-
-    public boolean containsValue(T value){
-        return values.contains(value);
-    }
-
-    public int getMatchLength(String input){
-        int matchLength = 0;
-
-        if (input == null){
-            return 0;
-        }
-
-        while(matchLength < key.length()
-                && matchLength < input.length()
-                && input.charAt(matchLength) == key.charAt(matchLength)){
-            matchLength += 1;
-        }
-
-        return matchLength;
-    }
-
-
-    /**
-     * Finds the length of the match between node's key and input.
-     *
-     * It can do either a character by character match or a regular expression match(used to match a feed instance path
-     * with feed location template). Only regular expressions allowed in the feed path are evaluated for matching.
-     * @param input input string to be matched with the key of the node.
-     * @param matcher A custom matcher algorithm to match node's key against the input. It is used when matching
-     *                path of a Feed's instance to Feed's path template.
-     * @return
-     */
-    public boolean matches(String input, FalconRadixUtils.INodeAlgorithm matcher) {
-        if (input == null) {
-            return false;
-        }
-
-        if (matcher == null) {
-            return StringUtils.equals(getKey(), input);
-        }
-
-        return matcher.match(this.getKey(), input);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/RadixTree.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/RadixTree.java b/common/src/main/java/org/apache/falcon/util/RadixTree.java
deleted file mode 100644
index a667506..0000000
--- a/common/src/main/java/org/apache/falcon/util/RadixTree.java
+++ /dev/null
@@ -1,432 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.entity.store.FeedPathStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Formattable;
-import java.util.Formatter;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-
-
-/**
- * A thread-safe Radix Tree implementation of the LocationStore.
- *
- *
- * A radix tree (also patricia trie or radix trie or compact prefix tree) is a space-optimized
- * trie data structure where each node with only one child is merged with its parent.
- *
- * For example the tree representation for the following (key,value) pairs -
- * [("key1", "value1"), ("key123", "Key was key123"), ("key124", "Key was key124"),
- * ("key2", "value2"), ("random", "random")] will be as below.
- *
- * |
- *    |-key
- *    |--1[[value1]]*
- *    |---2
- *    |----3[[Key was key123]]*
- *    |----4[[Key was key124]]*
- *    |--2[[value2]]*
- *    |-random[[random]]*
- *
- * For more details on Radix Tree please refer
- * <a href="http://en.wikipedia.org/wiki/Radix_tree">Radix Tree</a>
- * @param <T> Type of value being stored against the key.
- */
-public class RadixTree<T> implements FeedPathStore<T>, Formattable {
-    private static final Logger LOG = LoggerFactory.getLogger(RadixTree.class);
-
-    protected RadixNode<T> root;
-
-    private int size;
-
-    public RadixTree(){
-        root = new RadixNode<T>();
-        root.setKey("");
-        size = 0;
-    }
-
-    /**
-     * Return the number of keys stored in the tree.
-     *
-     * Since all keys end in terminal nodes and duplicate keys are not allowed,
-     * size is equal to the number of terminal nodes in the tree.
-     * @return number of keys in the tree.
-     */
-    @Override
-    public synchronized int getSize() {
-        return size;
-    }
-
-    /**
-     * Insert a <key, value> pair in the Radix Tree.
-     *
-     * @param key Key to be stored
-     * @param value Value to be stored against that key
-     */
-    @Override
-    public synchronized void insert(@Nullable String key, @Nonnull T value){
-        if (key != null && !key.trim().isEmpty()){
-            LOG.debug("Insert called for key: {} and value: {}", key.trim(), value);
-            insertKeyRecursive(key.trim(), value, root);
-        }
-    }
-
-    private void insertKeyRecursive(String remainingText, T value, RadixNode<T> currentNode){
-
-        int currentMatchLength = currentNode.getMatchLength(remainingText);
-        String newRemainingText = remainingText.substring(currentMatchLength, remainingText.length());
-
-        // if root or current node key is subset of the input key GO DOWN
-        if (currentNode.isRoot()
-                || (currentMatchLength == currentNode.getKey().length()
-                && currentMatchLength < remainingText.length())){
-
-            // if a path to go down exists then go down that path
-            boolean foundPath = false;
-            for(RadixNode<T> child: currentNode.getChildren()){
-                if (child.getKey().charAt(0) == newRemainingText.charAt(0)){
-                    insertKeyRecursive(newRemainingText, value, child);
-                    foundPath = true;
-                    break;
-                }
-            }
-            // else create a new node.
-            if (!foundPath){
-                RadixNode<T> node = new RadixNode<T>();
-                node.setKey(newRemainingText);
-                node.addValue(value);
-                node.setTerminal(true);
-                currentNode.getChildren().add(node);
-                size += 1;
-            }
-        }else if (currentMatchLength == remainingText.length() && currentMatchLength < currentNode.getKey().length()){
-            // if remainingText is subset of the current node key
-            RadixNode<T> node = new RadixNode<T>();
-            node.setChildren(currentNode.getChildren());
-            node.setKey(currentNode.getKey().substring(currentMatchLength));
-            node.setValues(currentNode.getValues());
-            node.setTerminal(currentNode.isTerminal());
-
-            currentNode.setChildren(new LinkedList<RadixNode<T>>());
-            currentNode.getChildren().add(node);
-            currentNode.setTerminal(true);
-            currentNode.setKey(currentNode.getKey().substring(0, currentMatchLength));
-            currentNode.removeAll();
-            currentNode.addValue(value);
-
-            size += 1;
-
-        }else if (currentMatchLength < remainingText.length() && currentMatchLength < currentNode.getKey().length()){
-
-            //add new Node and move all current node's children and value to it
-            RadixNode<T> node = new RadixNode<T>();
-            node.setChildren(currentNode.getChildren());
-            node.setTerminal(currentNode.isTerminal());
-            node.setValues(currentNode.getValues());
-            node.setKey(currentNode.getKey().substring(currentMatchLength, currentNode.getKey().length()));
-
-            // add node for the text
-            RadixNode<T> node2 = new RadixNode<T>();
-            node2.setKey(newRemainingText);
-            node2.setTerminal(true);
-            node2.addValue(value);
-
-            //update current node to be new root
-            currentNode.setTerminal(false);
-            currentNode.setKey(currentNode.getKey().substring(0, currentMatchLength));
-            currentNode.setChildren(new LinkedList<RadixNode<T>>());
-            currentNode.getChildren().add(node);
-            currentNode.getChildren().add(node2);
-
-            size += 1;
-        }else if (currentMatchLength == remainingText.length() && currentMatchLength == currentNode.getKey().length()){
-            // if current node key and input key both match equally
-            if (currentNode.isTerminal()){
-                currentNode.addValue(value);
-            }else {
-                currentNode.setTerminal(true);
-                currentNode.addValue(value);
-            }
-            size += 1;
-        }
-    }
-
-    /**
-     * Find the value for the given key if it exists in the tree, null otherwise.
-     *
-     * A key is said to exist in the tree if we can generate exactly that string
-     * by going down from root to a terminal node. If a key exists we return the value
-     * stored at the terminal node.
-     *
-     * @param key - input key to be searched.
-     * @return T Value of the key if it exists, null otherwise
-     */
-    @Override
-    @Nullable
-    public synchronized Collection<T> find(@Nonnull String key, FalconRadixUtils.INodeAlgorithm algorithm) {
-        if (key != null && !key.trim().isEmpty()) {
-            if (algorithm == null) {
-                algorithm = new FalconRadixUtils.StringAlgorithm();
-            }
-            return recursiveFind(key.trim(), root, algorithm);
-        }
-        return null;
-    }
-
-    @Nullable
-    @Override
-    public Collection<T> find(@Nonnull String key) {
-        if (key != null && !key.trim().isEmpty()) {
-            FalconRadixUtils.INodeAlgorithm algorithm = new FalconRadixUtils.StringAlgorithm();
-            return recursiveFind(key.trim(), root, algorithm);
-        }
-        return null;
-    }
-
-    private Collection<T> recursiveFind(String key, RadixNode<T> currentNode,
-        FalconRadixUtils.INodeAlgorithm algorithm){
-
-        if (!algorithm.startsWith(currentNode.getKey(), key)){
-            LOG.debug("Current Node key: {} is not a prefix in the input key: {}", currentNode.getKey(), key);
-            return null;
-        }
-
-        if (algorithm.match(currentNode.getKey(), key)){
-            if (currentNode.isTerminal()){
-                LOG.debug("Found the terminal node with key: {} for the given input.", currentNode.getKey());
-                return currentNode.getValues();
-            }else {
-                LOG.debug("currentNode is not terminal. Current node's key is {}", currentNode.getKey());
-                return null;
-            }
-        }
-
-        //find child to follow, using remaining Text
-        RadixNode<T> newRoot = algorithm.getNextCandidate(currentNode, key);
-        String remainingText = algorithm.getRemainingText(currentNode, key);
-
-        if (newRoot == null){
-            LOG.debug("No child found to follow for further processing. Current node key {}");
-            return null;
-        }else {
-            LOG.debug("Recursing with new key: {} and new remainingText: {}", newRoot.getKey(), remainingText);
-            return recursiveFind(remainingText, newRoot, algorithm);
-        }
-    }
-
-    /**
-     *  Deletes a given key,value pair from the Radix Tree.
-     *
-     * @param key key to be deleted
-     * @param value value to be deleted
-     */
-    @Override
-    public synchronized boolean delete(@Nonnull String key, @Nonnull T value) {
-        if (key != null && !key.trim().isEmpty()){
-            LOG.debug("Delete called for key:{}", key.trim());
-            return recursiveDelete(key, null, root, value);
-        }
-        return false;
-    }
-
-    private boolean recursiveDelete(String key, RadixNode<T> parent, RadixNode<T> currentNode, T value){
-        LOG.debug("Recursing with key: {}, currentNode: {}", key, currentNode.getKey());
-        if (!key.startsWith(currentNode.getKey())){
-            LOG.debug("Current node's key: {} is not a prefix of the remaining input key: {}",
-                    currentNode.getKey(), key);
-            return false;
-        }
-
-        if (StringUtils.equals(key, currentNode.getKey())){
-            LOG.trace("Current node's key:{} and the input key:{} matched", currentNode.getKey(), key);
-            if (currentNode.getValues().contains(value)){
-                LOG.debug("Given value is found in the collection of values against the given key");
-                currentNode.removeValue(value);
-                size -= 1;
-                if (currentNode.getValues().size() == 0){
-                    LOG.debug("Exact match between current node's key: {} and remaining input key: {}",
-                        currentNode.getKey(), key);
-                    if (currentNode.isTerminal()){
-                        //if child has no children & only one value, then delete and compact parent if needed
-                        if (currentNode.getChildren().size() == 0){
-                            Iterator<RadixNode<T>> it = parent.getChildren().iterator();
-                            while(it.hasNext()){
-                                if (StringUtils.equals(it.next().getKey(), currentNode.getKey())){
-                                    it.remove();
-                                    LOG.debug("Deleting the node");
-                                    break;
-                                }
-                            }
-                        }else if (currentNode.getChildren().size() > 1){
-                            // if child has more than one children just mark non terminal
-                            currentNode.setTerminal(false);
-                        }else if (currentNode.getChildren().size() == 1){
-                            // if child has only one child then compact node
-                            LOG.debug("compacting node with child as node to be deleted has only 1 child");
-                            RadixNode<T> child = currentNode.getChildren().get(0);
-                            currentNode.setChildren(child.getChildren());
-                            currentNode.setTerminal(child.isTerminal());
-                            currentNode.setKey(currentNode.getKey() + child.getKey());
-                            currentNode.setValues(child.getValues());
-                        }
-
-                        //parent can't be null as root will never match with input key as it is not a terminal node.
-                        if (!parent.isTerminal() && !parent.isRoot()){
-                            // if only one child left in parent and parent is not root then join parent
-                            // and the only child key
-                            if (parent.getChildren().size() == 1){
-                                RadixNode<T> onlyChild = parent.getChildren().get(0);
-                                String onlyChildKey = onlyChild.getKey();
-                                LOG.debug("Compacting child: {} and parent: {}", onlyChildKey, parent.getKey());
-                                parent.setKey(parent.getKey() + onlyChildKey);
-                                parent.setChildren(onlyChild.getChildren());
-                                parent.setTerminal(onlyChild.isTerminal());
-                                parent.setValues(onlyChild.getValues());
-                            }
-                        }
-                        return true;
-                    }else{
-                        LOG.debug("Key found only as a prefix and not at a terminal node");
-                        return false;
-                    }
-                }
-                return true;
-            }else {
-                LOG.debug("Current value is not found in the collection of values against the given key, no-op");
-                return false;
-            }
-        }
-
-        LOG.debug("Current node's key: {} is a prefix of the input key: {}", currentNode.getKey(), key);
-        //find child to follow
-        RadixNode<T> newRoot = null;
-        String remainingKey = key.substring(currentNode.getMatchLength(key));
-        for(RadixNode<T> el : currentNode.getChildren()){
-            LOG.trace("Finding next child to follow. Current child's key:{}", el.getKey());
-            if (el.getKey().charAt(0) == remainingKey.charAt(0)){
-                newRoot = el;
-                break;
-            }
-        }
-
-        if (newRoot == null){
-            LOG.debug("No child was found with common prefix with the remainder key: {}", key);
-            return false;
-        }else {
-            LOG.debug("Found a child's key: {} with common prefix, recursing on it", newRoot.getKey());
-            return recursiveDelete(remainingKey, currentNode, newRoot, value);
-        }
-    }
-
-
-    /**
-     * Useful for debugging.
-     */
-    @Override
-    public void formatTo(Formatter formatter, int flags, int width, int precision) {
-        formatNodeTo(formatter, 0, root);
-
-    }
-
-    private void formatNodeTo(Formatter formatter, int level, RadixNode<T> node){
-        for (int i = 0; i < level; i++) {
-            formatter.format(" ");
-        }
-        formatter.format("|");
-        for (int i = 0; i < level; i++) {
-            formatter.format("-");
-        }
-
-        if (node.isTerminal()){
-            formatter.format("%s[%s]*%n", node.getKey(),  node.getValues());
-        }else{
-            formatter.format("%s%n", node.getKey());
-        }
-
-        for (RadixNode<T> child : node.getChildren()) {
-            formatNodeTo(formatter, level + 1, child);
-        }
-    }
-
-    /**
-     * Find List of substring of keys which have given input as a prefix.
-     *
-     * @param key - Input string for which all Suffix Children should be returned
-     * @param limit - Maximum Number of results. If limit is less than 0 then all nodes are returned.
-     *              If limit is 0 then returns null.
-     */
-    @javax.annotation.Nullable
-    public List<String> findSuffixChildren(String key, int limit){
-        if (key == null || limit == 0){
-            return null;
-        }
-        RadixNode<T> currentNode = root;
-        String remainingText = key.trim();
-        List<String> result = new LinkedList<String>();
-        do{
-            boolean flag = false;
-            // find the child with common prefix
-            for(RadixNode<T> child: currentNode.getChildren()){
-                LOG.debug("Checking for child key: {} against remainingText: {}", child.getKey(), remainingText);
-                if (child.getKey().charAt(0) == remainingText.charAt(0)){
-                    LOG.debug("Child key: {} found to have overlap with the remainingText: {}", child.getKey(),
-                            remainingText);
-                    flag = true;
-
-                    //if entire key doesn't match return null
-                    if (!remainingText.startsWith(child.getKey())){
-                        return null;
-                    }
-
-                    // if entire key equals remainingText - return it's children up to the specified limit
-                    if (StringUtils.equals(child.getKey(), remainingText)){
-                        int counter = 0;
-
-                        for(RadixNode<T> suffixChild: child.getChildren()){
-                            if (limit < 0 || counter < limit){
-                                result.add(suffixChild.getKey());
-                            }
-                        }
-                        return Collections.unmodifiableList(result);
-                    }
-
-                    //if entire key matches but it is not equal to entire remainingText - repeat
-                    remainingText = remainingText.substring(child.getKey().length());
-                    currentNode = child;
-                    break;
-
-                }
-            }
-            // if no child found with common prefix return null;
-            if (!flag){
-                return null;
-            }
-        }while (true);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/ReflectionUtils.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/ReflectionUtils.java b/common/src/main/java/org/apache/falcon/util/ReflectionUtils.java
deleted file mode 100644
index 80022e0..0000000
--- a/common/src/main/java/org/apache/falcon/util/ReflectionUtils.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-
-/**
- * Helper methods for class instantiation through reflection.
- */
-public final class ReflectionUtils {
-
-    private ReflectionUtils() {}
-
-    public static <T> T getInstance(String classKey) throws FalconException {
-        return ReflectionUtils.<T>getInstanceByClassName(StartupProperties.get().getProperty(classKey));
-    }
-
-    public static <T> T getInstance(String classKey, Class<?> argCls, Object arg) throws FalconException {
-        return ReflectionUtils.<T>getInstanceByClassName(StartupProperties.get().getProperty(classKey), argCls, arg);
-    }
-
-    @SuppressWarnings("unchecked")
-    public static <T> T getInstanceByClassName(String clazzName) throws FalconException {
-        try {
-            Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
-            try {
-                return clazz.newInstance();
-            } catch (IllegalAccessException e) {
-                Method method = clazz.getMethod("get");
-                return (T) method.invoke(null);
-            }
-        } catch (Exception e) {
-            throw new FalconException("Unable to get instance for " + clazzName, e);
-        }
-    }
-
-    /**
-     * Invokes constructor with one argument.
-     * @param clazzName - classname
-     * @param argCls - Class of the argument
-     * @param arg - constructor argument
-     * @param <T> - instance type
-     * @return Class instance
-     * @throws FalconException
-     */
-    @SuppressWarnings("unchecked")
-    public static <T> T getInstanceByClassName(String clazzName, Class<?> argCls, Object arg) throws
-        FalconException {
-        try {
-            Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
-            Constructor<T> constructor = clazz.getConstructor(argCls);
-            return constructor.newInstance(arg);
-        } catch (Exception e) {
-            throw new FalconException("Unable to get instance for " + clazzName, e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java b/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java
deleted file mode 100644
index a8b99bb..0000000
--- a/common/src/main/java/org/apache/falcon/util/ReplicationDistCpOption.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-/**
- * enum for DistCp options.
- */
-public enum ReplicationDistCpOption {
-
-    DISTCP_OPTION_OVERWRITE("overwrite"),
-    DISTCP_OPTION_IGNORE_ERRORS("ignoreErrors"),
-    DISTCP_OPTION_SKIP_CHECKSUM("skipChecksum"),
-    DISTCP_OPTION_REMOVE_DELETED_FILES("removeDeletedFiles"),
-    DISTCP_OPTION_PRESERVE_BLOCK_SIZE("preserveBlockSize"),
-    DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER("preserveReplicationNumber"),
-    DISTCP_OPTION_PRESERVE_PERMISSIONS("preservePermission");
-
-    private final String name;
-
-    ReplicationDistCpOption(String name) {
-        this.name = name;
-    }
-
-    public String getName() {
-        return name;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/RuntimeProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/RuntimeProperties.java b/common/src/main/java/org/apache/falcon/util/RuntimeProperties.java
deleted file mode 100644
index 714a64d..0000000
--- a/common/src/main/java/org/apache/falcon/util/RuntimeProperties.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Dynamic properties that may be modified while the server is running.
- */
-public final class RuntimeProperties extends ApplicationProperties {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RuntimeProperties.class);
-
-    private static final String PROPERTY_FILE = "runtime.properties";
-
-    private static final AtomicReference<RuntimeProperties> INSTANCE =
-            new AtomicReference<RuntimeProperties>();
-
-    private RuntimeProperties() throws FalconException {
-        super();
-    }
-
-    @Override
-    protected String getPropertyFile() {
-        return PROPERTY_FILE;
-    }
-
-    public static Properties get() {
-        try {
-            if (INSTANCE.get() == null) {
-                RuntimeProperties properties = new RuntimeProperties();
-                properties.loadProperties();
-                properties.validateProperties();
-                INSTANCE.compareAndSet(null, properties);
-                if (INSTANCE.get() == properties) {
-                    Thread refreshThread = new Thread(new DynamicLoader());
-                    refreshThread.start();
-                }
-            }
-            return INSTANCE.get();
-        } catch (FalconException e) {
-            throw new RuntimeException("Unable to read application " + "runtime properties", e);
-        }
-    }
-
-    protected void validateProperties() throws FalconException {
-        String colosProp = getProperty("all.colos");
-        if (colosProp == null || colosProp.isEmpty()) {
-            return;
-        }
-        String[] colos = colosProp.split(",");
-        for (int i = 0; i < colos.length; i++) {
-            colos[i] = colos[i].trim();
-            String falconEndpoint = getProperty("falcon." + colos[i] + ".endpoint");
-            if (falconEndpoint == null || falconEndpoint.isEmpty()) {
-                throw new FalconException("No falcon server endpoint mentioned in Prism runtime for colo, "
-                        + colos[i] + ".");
-            }
-        }
-    }
-
-    /**
-     * Thread for loading properties periodically.
-     */
-    private static  final class DynamicLoader implements Runnable {
-
-        private static final long REFRESH_DELAY = 300000L;
-        private static final int MAX_ITER = 20;  //1hr
-
-        @Override
-        public void run() {
-            long backOffDelay = REFRESH_DELAY;
-            while (true) {
-                try {
-                    Thread.sleep(Math.min(MAX_ITER * REFRESH_DELAY, backOffDelay));
-                    try {
-                        RuntimeProperties newProperties = new RuntimeProperties();
-                        newProperties.loadProperties();
-                        newProperties.validateProperties();
-                        INSTANCE.set(newProperties);
-                        backOffDelay = REFRESH_DELAY;
-                    } catch (FalconException e) {
-                        LOG.warn("Error refreshing runtime properties", e);
-                        backOffDelay += REFRESH_DELAY;
-                    }
-                } catch (InterruptedException e) {
-                    LOG.error("Application is stopping. Aborting...");
-                    break;
-                }
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/StartupProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/StartupProperties.java b/common/src/main/java/org/apache/falcon/util/StartupProperties.java
deleted file mode 100644
index 7522b0d..0000000
--- a/common/src/main/java/org/apache/falcon/util/StartupProperties.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.falcon.FalconException;
-
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Properties read during application startup.
- */
-public final class StartupProperties extends ApplicationProperties {
-
-    private static final String PROPERTY_FILE = "startup.properties";
-
-    private static final AtomicReference<StartupProperties> INSTANCE =
-            new AtomicReference<StartupProperties>();
-
-    private StartupProperties() throws FalconException {
-        super();
-    }
-
-    @Override
-    protected String getPropertyFile() {
-        return PROPERTY_FILE;
-    }
-
-    public static Properties get() {
-        try {
-            if (INSTANCE.get() == null) {
-                INSTANCE.compareAndSet(null, new StartupProperties());
-            }
-            return INSTANCE.get();
-        } catch (FalconException e) {
-            throw new RuntimeException("Unable to read application " + "startup properties", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/util/StateStoreProperties.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/StateStoreProperties.java b/common/src/main/java/org/apache/falcon/util/StateStoreProperties.java
deleted file mode 100644
index a3e6a56..0000000
--- a/common/src/main/java/org/apache/falcon/util/StateStoreProperties.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.expression.ExpressionHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Properties for State Store during application startup.
- */
-public final class StateStoreProperties extends ApplicationProperties {
-
-    private static final Logger LOG = LoggerFactory.getLogger(StateStoreProperties.class);
-
-    private static final String PROPERTY_FILE = "statestore.properties";
-    private static final String CREDENTIALS_FILE= "falcon.statestore.credentials.file";
-    private static final String DEFAULT_CREDENTIALS_FILE = "statestore.credentials";
-
-    private static final AtomicReference<StateStoreProperties> INSTANCE =
-            new AtomicReference<>();
-
-
-    protected StateStoreProperties() throws FalconException {
-        super();
-    }
-
-    @Override
-    protected String getPropertyFile() {
-        return PROPERTY_FILE;
-    }
-
-    @Override
-    protected void loadProperties() throws FalconException {
-        super.loadProperties();
-
-        String credentialsFile = (String)get(CREDENTIALS_FILE);
-        try {
-            InputStream resourceAsStream = null;
-            if (StringUtils.isNotBlank(credentialsFile)) {
-                resourceAsStream = getResourceAsStream(new File(credentialsFile));
-            }
-            // fall back to class path.
-            if (resourceAsStream == null) {
-                resourceAsStream = checkClassPath(DEFAULT_CREDENTIALS_FILE);
-            }
-            if (resourceAsStream != null) {
-                try {
-                    loadCredentials(resourceAsStream);
-                    return;
-                } finally {
-                    IOUtils.closeQuietly(resourceAsStream);
-                }
-            } else {
-                throw new FalconException("Unable to find state store credentials file");
-            }
-        } catch (IOException e) {
-            throw new FalconException("Error loading properties file: " + getPropertyFile(), e);
-        }
-    }
-
-    private void loadCredentials(InputStream resourceAsStream) throws IOException {
-        Properties origProps = new Properties();
-        origProps.load(resourceAsStream);
-        LOG.info("Initializing {} properties with domain {}", this.getClass().getName(), domain);
-        Set<String> keys = getKeys(origProps.keySet());
-        for (String key : keys) {
-            String value = origProps.getProperty(domain + "." + key, origProps.getProperty("*." + key));
-            if (value != null) {
-                value = ExpressionHelper.substitute(value);
-                LOG.debug("{}={}", key, value);
-                put(key, value);
-            }
-        }
-    }
-
-
-    public static Properties get() {
-        try {
-            if (INSTANCE.get() == null) {
-                INSTANCE.compareAndSet(null, new StateStoreProperties());
-            }
-            return INSTANCE.get();
-        } catch (FalconException e) {
-            throw new RuntimeException("Unable to read application state store properties", e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/WorkflowEngineFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowEngineFactory.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowEngineFactory.java
deleted file mode 100644
index c713712..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowEngineFactory.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.lifecycle.AbstractPolicyBuilderFactory;
-import org.apache.falcon.util.ReflectionUtils;
-import org.apache.falcon.workflow.engine.AbstractWorkflowEngine;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-
-/**
- * Factory for providing appropriate workflow engine to the falcon service.
- */
-@SuppressWarnings("unchecked")
-public final class WorkflowEngineFactory {
-
-    private static final Logger LOG = LoggerFactory.getLogger(WorkflowEngineFactory.class);
-    public static final String ENGINE_PROP="falcon.scheduler";
-    private static AbstractWorkflowEngine nativeWorkflowEngine;
-    private static AbstractWorkflowEngine configuredWorkflowEngine;
-    private static final String CONFIGURED_WORKFLOW_ENGINE = "workflow.engine.impl";
-    private static final String LIFECYCLE_ENGINE = "lifecycle.engine.impl";
-
-    private WorkflowEngineFactory() {
-    }
-
-    /**
-     * @param entity
-     * @return The workflow engine using which the entity is scheduled.
-     * @throws FalconException
-     */
-    public static AbstractWorkflowEngine getWorkflowEngine(Entity entity) throws FalconException {
-        // The below check is only for schedulable entities.
-        if (entity != null
-                && entity.getEntityType().isSchedulable() && getNativeWorkflowEngine().isActive(entity)) {
-            LOG.debug("Returning native workflow engine for entity {}", entity.getName());
-            return nativeWorkflowEngine;
-        }
-        LOG.debug("Returning configured workflow engine for entity {}.", entity);
-        return getWorkflowEngine();
-    }
-
-    /**
-     * @param entity
-     * @param props
-     * @return Workflow engine as specified in the props and for a given schedulable entity.
-     * @throws FalconException
-     */
-    public static AbstractWorkflowEngine getWorkflowEngine(Entity entity, Map<String, String> props)
-        throws FalconException {
-        // If entity is null or not schedulable and the engine property is not specified, return the configured WE.
-        if (entity == null || !entity.getEntityType().isSchedulable()) {
-            LOG.debug("Returning configured workflow engine for entity {}.", entity);
-            return getWorkflowEngine();
-        }
-
-        // Default to configured workflow engine when no properties are specified.
-        String engineName = getWorkflowEngine().getName();
-        if (props != null && props.containsKey(ENGINE_PROP)) {
-            engineName = props.get(ENGINE_PROP);
-        }
-
-        if (engineName.equalsIgnoreCase(getWorkflowEngine().getName())) {
-            // If already active on native
-            if (getNativeWorkflowEngine().isActive(entity)) {
-                throw new FalconException("Entity " + entity.getName() + " is already scheduled on native engine.");
-            }
-            LOG.debug("Returning configured workflow engine for entity {}", entity.getName());
-            return configuredWorkflowEngine;
-        } else if (engineName.equalsIgnoreCase(getNativeWorkflowEngine().getName())) {
-            // If already active on configured workflow engine
-            if (getWorkflowEngine().isActive(entity)) {
-                throw new FalconException("Entity " + entity.getName() + " is already scheduled on "
-                        + "configured workflow engine.");
-            }
-            LOG.debug("Returning native workflow engine for entity {}", entity.getName());
-            return nativeWorkflowEngine;
-        } else {
-            throw new IllegalArgumentException("Property " + ENGINE_PROP + " is not set to a valid value.");
-        }
-    }
-
-    /**
-     * @return An instance of the configurated workflow engine.
-     * @throws FalconException
-     */
-    public static AbstractWorkflowEngine getWorkflowEngine() throws FalconException {
-        // Caching is only for optimization, workflow engine doesn't need to be a singleton.
-        if (configuredWorkflowEngine == null) {
-            configuredWorkflowEngine = ReflectionUtils.getInstance(CONFIGURED_WORKFLOW_ENGINE);
-        }
-        return configuredWorkflowEngine;
-    }
-
-    public static AbstractWorkflowEngine getNativeWorkflowEngine() throws FalconException {
-        if (nativeWorkflowEngine  ==  null) {
-            nativeWorkflowEngine =
-                    ReflectionUtils.getInstanceByClassName("org.apache.falcon.workflow.engine.FalconWorkflowEngine");
-        }
-        return nativeWorkflowEngine;
-    }
-
-    public static AbstractPolicyBuilderFactory getLifecycleEngine() throws FalconException {
-        return ReflectionUtils.getInstance(LIFECYCLE_ENGINE);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionArgs.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionArgs.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionArgs.java
deleted file mode 100644
index 2171092..0000000
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionArgs.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.workflow;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-
-/**
- * Arguments for workflow execution.
- */
-public enum WorkflowExecutionArgs {
-
-    // instance details
-    NOMINAL_TIME("nominalTime", "instance time"),
-    ENTITY_TYPE("entityType", "type of the entity"),
-    ENTITY_NAME("entityName", "name of the entity"),
-    TIMESTAMP("timeStamp", "current timestamp"),
-
-    // where
-    CLUSTER_NAME("cluster", "name of the current cluster"),
-    OPERATION("operation", "operation like generate, delete, replicate"),
-    // Exactly same as the above. Introduced to ensure compatibility between messages produced by POST-PROCESSING and
-    // the values in conf.
-    DATA_OPERATION("falconDataOperation", "operation like generate, delete, replicate", false),
-    DATASOURCE_NAME("datasource", "name of the datasource", false),
-
-    // who
-    WORKFLOW_USER("workflowUser", "user who owns the feed instance (partition)"),
-
-    // what
-    // workflow details
-    USER_WORKFLOW_ENGINE("userWorkflowEngine", "user workflow engine type", false),
-    USER_WORKFLOW_NAME("userWorkflowName", "user workflow name", false),
-    USER_WORKFLOW_VERSION("userWorkflowVersion", "user workflow version", false),
-
-    // workflow execution details
-    WORKFLOW_ID("workflowId", "current workflow-id of the instance"),
-    RUN_ID("runId", "current run-id of the instance"),
-    STATUS("status", "status of the user workflow isnstance"),
-    WF_ENGINE_URL("workflowEngineUrl", "url of workflow engine server, ex:oozie", false),
-    USER_SUBFLOW_ID("subflowId", "external id of user workflow", false),
-    PARENT_ID("parentId", "The parent of the current workflow, typically coord action", false),
-
-    WF_START_TIME("workflowStartTime", "workflow start time", false),
-    WF_END_TIME("workflowEndTime", "workflow end time", false),
-    WF_DURATION("workflowDuration", "workflow duration", false),
-
-    // what inputs
-    INPUT_FEED_NAMES("falconInputFeeds", "name of the feeds which are used as inputs", false),
-    INPUT_FEED_PATHS("falconInPaths", "comma separated input feed instance paths", false),
-    INPUT_NAMES("falconInputNames", "name of the inputs", false),
-    INPUT_STORAGE_TYPES("falconInputFeedStorageTypes", "input storage types", false),
-
-    // what outputs
-    OUTPUT_FEED_NAMES("feedNames", "name of the feeds which are generated/replicated/deleted"),
-    OUTPUT_FEED_PATHS("feedInstancePaths", "comma separated feed instance paths"),
-
-    // broker related parameters
-    TOPIC_NAME("topicName", "name of the topic to be used to send JMS message", false),
-    BRKR_IMPL_CLASS("brokerImplClass", "falcon message broker Implementation class"),
-    BRKR_URL("brokerUrl", "falcon message broker url"),
-    USER_BRKR_IMPL_CLASS("userBrokerImplClass", "user broker Impl class", false),
-    USER_BRKR_URL("userBrokerUrl", "user broker url", false),
-    BRKR_TTL("brokerTTL", "time to live for broker message in sec", false),
-    USER_JMS_NOTIFICATION_ENABLED("userJMSNotificationEnabled", "Is User notification via JMS enabled?", false),
-    SYSTEM_JMS_NOTIFICATION_ENABLED("systemJMSNotificationEnabled", "Is system notification via JMS enabled?", false),
-
-    // state maintained
-    LOG_FILE("logFile", "log file path where feeds to be deleted are recorded", false),
-    // execution context data recorded
-    LOG_DIR("logDir", "log dir where lineage can be recorded"),
-
-    CONTEXT_FILE("contextFile", "wf execution context file path where wf properties are recorded", false),
-    CONTEXT_TYPE("contextType", "wf execution context type, pre or post processing", false),
-    COUNTERS("counters", "store job counters", false);
-
-    private final String name;
-    private final String description;
-    private final boolean isRequired;
-
-    WorkflowExecutionArgs(String name, String description) {
-        this(name, description, true);
-    }
-
-    WorkflowExecutionArgs(String name, String description, boolean isRequired) {
-        this.name = name;
-        this.description = description;
-        this.isRequired = isRequired;
-    }
-
-    public Option getOption() {
-        return new Option(this.name, true, this.description);
-    }
-
-    public String getName() {
-        return this.name;
-    }
-
-    public String getDescription() {
-        return description;
-    }
-
-    public boolean isRequired() {
-        return isRequired;
-    }
-
-    public String getOptionValue(CommandLine cmd) {
-        return cmd.getOptionValue(this.name);
-    }
-
-    @Override
-    public String toString() {
-        return getName();
-    }
-}


[21/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/ImportExport.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/ImportExport.twiki b/docs/src/site/twiki/ImportExport.twiki
deleted file mode 100644
index b0ce7ff..0000000
--- a/docs/src/site/twiki/ImportExport.twiki
+++ /dev/null
@@ -1,242 +0,0 @@
----+Falcon Data Import and Export
-
-
----++Overview
-
-Falcon provides constructs to periodically bring raw data from external data sources (like databases, drop boxes etc)
-onto Hadoop and push derived data computed on Hadoop onto external data sources.
-
-As of this release, Falcon only supports Relational Databases (e.g. Oracle, MySQL etc) via JDBC as external data source.
-The future releases will add support for other external data sources.
-
-
----++Prerequisites
-
-Following are the prerequisites to import external data from and export to databases.
-
-   * *Sqoop 1.4.6+*
-   * *Oozie 4.2.0+*
-   * *Appropriate database connector*
-
-
-*Note:* Falcon uses Sqoop for import/export operation. Sqoop will require appropriate database driver to connect to
-the relational database. Please refer to the Sqoop documentation for any Sqoop related question. Please make sure
-the database driver jar is copied into oozie share lib for Sqoop.
-
-<verbatim>
-For example, in order to import and export with MySQL, please make sure the latest MySQL connector
-*mysql-connector-java-5.1.31.jar+* is copied into oozie's Sqoop share lib
-
-/user/oozie/share/lib/{lib-dir}/sqoop/mysql-connector-java-5.1.31.jar+
-
-where {lib-dir} value varies in oozie deployments.
-
-</verbatim>
-
----++ Usage
----+++ Entity Definition and Setup
-   * *Datasource Entity*
-      Datasource entity abstracts connection and credential details to external data sources. The Datasource entity
-      supports read and write interfaces with specific credentials. The default credential will be used if the read
-      or write interface does not have its own credentials. In general, the Datasource entity will be defined by
-      system administrator. Please refer to datasource XSD for more details.
-
-      The following example defines a Datasource entity for a MySQL database. The import operation will use
-      the read interface with url "jdbc:mysql://dbhost/test", user name "import_usr" and password text "sqoop".
-      Where as, the export operation will use the write interface with url "jdbc:mysql://dbhost/test" with user
-      name "export_usr" and password specified in a HDFS file at the location "/user/ambari-qa/password-store/password_write_user".
-
-      The default credential specified will be used if either the read or write interface does not provide its own
-      credentials. The default credential specifies the password using password alias feature available via hadoop credential
-      functionality. User will be able to create a password alias using "hadoop credential -create <alias> -provider
-      <provider-path>" command, where <alias> is a string and <provider-path> is a HDFS jceks file. During runtime,
-      the specified alias will be used to look up the password stored encrypted in the jceks hdfs file specified under
-      the providerPath element.
-
-      The available read and write interfaces enable database administrators to segregate read and write workloads.
-
-      <verbatim>
-
-      File: mysql-database.xml
-
-      <?xml version="1.0" encoding="UTF-8"?>
-      <datasource colo="west-coast" description="MySQL database on west coast" type="mysql" name="mysql-db" xmlns="uri:falcon:datasource:0.1">
-          <tags>owner=foobar@ambari.apache.org, consumer=phoe@ambari.apache.org</tags>
-          <interfaces>
-              <!-- ***** read interface ***** -->
-              <interface type="readonly" endpoint="jdbc:mysql://dbhost/test">
-                  <credential type="password-text">
-                      <userName>import_usr</userName>
-                      <passwordText>sqoop</passwordFile>
-                  </credential>
-              </interface>
-
-              <!-- ***** write interface ***** -->
-              <interface type="write"  endpoint="jdbc:mysql://dbhost/test">
-                  <credential type="password-file">
-                      <userName>export_usr</userName>
-                      <passwordFile>/user/ambari-qa/password-store/password_write_user</passwordFile>
-                  </credential>
-              </interface>
-
-              <!-- *** default credential *** -->
-              <credential type="password-alias">
-                <userName>sqoop2_user</userName>
-                <passwordAlias>
-                    <alias>sqoop.password.alias</alias>
-                    <providerPath>hdfs://namenode:8020/user/ambari-qa/sqoop_password.jceks</providerPath>
-                </passwordAlias>
-              </credential>
-
-          </interfaces>
-
-          <driver>
-              <clazz>com.mysql.jdbc.Driver</clazz>
-              <jar>/user/oozie/share/lib/lib_20150721010816/sqoop/mysql-connector-java-5.1.31</jar>
-          </driver>
-      </datasource>
-      </verbatim>
-
-   * *Feed  Entity*
-      Feed entity now enables users to define IMPORT and EXPORT policies in addition to RETENTION and REPLICATION.
-      The IMPORT and EXPORT policies will refer to a already defined Datasource entity for connection and credential
-      details and take a table name from the policy to operate on. Please refer to feed entity XSD for details.
-
-      The following example defines a Feed entity with IMPORT and EXPORT policies. Both the IMPORT and EXPORT operations
-      refer to a datasource entity "mysql-db". The IMPORT operation will use the read interface and credentials while
-      the EXPORT operation will use the write interface and credentials. A feed instance is created every 1 hour
-      since the frequency of the Feed is hour(1) and the Feed instances are deleted after 90 days because of the
-      retention policy.
-
-
-      <verbatim>
-
-      File: customer_email_feed.xml
-
-      <?xml version="1.0" encoding="UTF-8"?>
-      <!--
-       A feed representing Hourly customer email data retained for 90 days
-       -->
-      <feed description="Raw customer email feed" name="customer_feed" xmlns="uri:falcon:feed:0.1">
-          <tags>externalSystem=USWestEmailServers,classification=secure</tags>
-          <groups>DataImportPipeline</groups>
-          <frequency>hours(1)</frequency>
-          <late-arrival cut-off="hours(4)"/>
-          <clusters>
-              <cluster name="primaryCluster" type="source">
-                  <validity start="2015-12-15T00:00Z" end="2016-03-31T00:00Z"/>
-                  <retention limit="days(90)" action="delete"/>
-                  <import>
-                      <source name="mysql-db" tableName="simple">
-                          <extract type="full">
-                              <mergepolicy>snapshot</mergepolicy>
-                          </extract>
-                          <fields>
-                              <includes>
-                                  <field>id</field>
-                                  <field>name</field>
-                              </includes>
-                          </fields>
-                      </source>
-                      <arguments>
-                          <argument name="--split-by" value="id"/>
-                          <argument name="--num-mappers" value="2"/>
-                      </arguments>
-                  </import>
-                  <export>
-                        <target name="mysql-db" tableName="simple_export">
-                            <load type="insert"/>
-                            <fields>
-                              <includes>
-                                <field>id</field>
-                                <field>name</field>
-                              </includes>
-                            </fields>
-                        </target>
-                        <arguments>
-                             <argument name="--update-key" value="id"/>
-                        </arguments>
-                    </export>
-              </cluster>
-          </clusters>
-
-          <locations>
-              <location type="data" path="/user/ambari-qa/falcon/demo/primary/importfeed/${YEAR}-${MONTH}-${DAY}-${HOUR}-${MINUTE}"/>
-              <location type="stats" path="/none"/>
-              <location type="meta" path="/none"/>
-          </locations>
-
-          <ACL owner="ambari-qa" group="users" permission="0755"/>
-          <schema location="/none" provider="none"/>
-
-      </feed>
-      </verbatim>
-
-   * *Import policy*
-     The import policy uses the datasource entity specified in the "source" to connect to the database. The tableName
-     specified should exist in the source datasource.
-
-     Extraction type specifies whether to pull data from external datasource "full" everytime or "incrementally".
-     The mergepolicy specifies how to organize (snapshot or append, i.e time series partiitons) the data on hadoop.
-     The valid combinations are:
-      * [full,snapshot] - data is extracted in full and dumped into the feed instance location.
-      * [incremental, append] - data is extracted incrementally using the key specified in the *deltacolumn*
-        and added as a partition to the feed instance location.
-      * [incremental, snapshot] - data is extracted incrementally and merged with already existing data on hadoop to
-        produce one latest feed instance.*This feature is not supported currently*. The use case for this feature is
-        to efficiently import very large dimention tables that have updates and inserts onto hadoop and make it available
-        as a snapshot with latest updates to consumers.
-
-      The following example defines an incremental extraction with append organization:
-
-      <verbatim>
-           <import>
-                <source name="mysql-db" tableName="simple">
-                    <extract type="incremental">
-                        <deltacolumn>modified_time</deltacolumn>
-                        <mergepolicy>append</mergepolicy>
-                    </extract>
-                    <fields>
-                        <includes>
-                            <field>id</field>
-                            <field>name</field>
-                        </includes>
-                    </fields>
-                </source>
-                <arguments>
-                    <argument name="--split-by" value="id"/>
-                    <argument name="--num-mappers" value="2"/>
-                </arguments>
-            </import>
-        </verbatim>
-
-
-     The fields option enables to control what fields get imported. By default, all fields get import. The "includes" option
-     brings only those fields specified. The "excludes" option brings all the fields other than specified.
-
-     The arguments section enables to pass in any extra arguments needed for fine control on the underlying implementation --
-     in this case, Sqoop.
-
-   * *Export policy*
-     The export, like import, uses the datasource for connecting to the database. Load type specifies whether to insert
-     or only update data onto the external table. Fields option behaves the same way as in import policy.
-     The tableName specified should exist in the external datasource.
-
----+++ Operation
-   Once the Datasource and Feed entity with import and export policies are defined, Users can submit and schedule
-   the Import and Export operations via CLI and REST API as below:
-
-   <verbatim>
-
-    ## submit the mysql-db datasource defined in the file mysql_datasource.xml
-    falcon entity -submit -type datasource -file mysql_datasource.xml
-
-    ## submit the customer_feed specified in the customer_email_feed.xml
-    falcon entity -submit -type feed -file customer_email_feed.xml
-
-    ## schedule the customer_feed
-    falcon entity -schedule -type feed -name customer_feed
-
-   </verbatim>
-
-   Falcon will create corresponding oozie bundles with coordinator and workflow for import and export operation.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/InstallationSteps.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/InstallationSteps.twiki b/docs/src/site/twiki/InstallationSteps.twiki
deleted file mode 100644
index a5ee2cc..0000000
--- a/docs/src/site/twiki/InstallationSteps.twiki
+++ /dev/null
@@ -1,87 +0,0 @@
----+Building & Installing Falcon
-
-
----++Building Falcon
-
----+++Prerequisites
-
-   * JDK 1.7/1.8
-   * Maven 3.2.x
-
-
-
----+++Step 1 - Clone the Falcon repository
-
-<verbatim>
-$git clone https://git-wip-us.apache.org/repos/asf/falcon.git falcon
-</verbatim>
-
-
----+++Step 2 - Build Falcon
-
-<verbatim>
-$cd falcon
-$export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m -noverify" && mvn clean install
-</verbatim>
-It builds and installs the package into the local repository, for use as a dependency in other projects locally.
-
-[optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of Hadoop]
-
-*NOTE:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards
-[optionally -Doozie.version=<<oozie version>> can be appended to build with a specific version of Oozie. Oozie versions
->= 4 are supported]
-NOTE: Falcon builds with JDK 1.7/1.8 using -noverify option
-      To compile Falcon with Hive Replication, optionally "-P hadoop-2,hivedr" can be appended. For this Hive >= 1.2.0
-      and Oozie >= 4.2.0 should be available.
-
-
-
----+++Step 3 - Package and Deploy Falcon
-
-Once the build successfully completes, artifacts can be packaged for deployment using the assembly plugin. The Assembly
-Plugin for Maven is primarily intended to allow users to aggregate the project output along with its dependencies,
-modules, site documentation, and other files into a single distributable archive. There are two basic ways in which you
-can deploy Falcon - Embedded mode(also known as Stand Alone Mode) and Distributed mode. Your next steps will vary based
-on the mode in which you want to deploy Falcon.
-
-*NOTE* : Oozie is being extended by Falcon (particularly on el-extensions) and hence the need for Falcon to build &
-re-package Oozie, so that users of Falcon can work with the right Oozie setup. Though Oozie is packaged by Falcon, it
-needs to be deployed separately by the administrator and is not auto deployed along with Falcon.
-
-
----++++Embedded/Stand Alone Mode
-Embedded mode is useful when the Hadoop jobs and relevant data processing involve only one Hadoop cluster. In this mode
- there is a single Falcon server that contacts the scheduler to schedule jobs on Hadoop. All the process/feed requests
- like submit, schedule, suspend, kill etc. are sent to this server. For running Falcon in this mode one should use the
- Falcon which has been built using standalone option. You can find the instructions for Embedded mode setup
- [[Embedded-mode][here]].
-
-
----++++Distributed Mode
-Distributed mode is for multiple (colos) instances of Hadoop clusters, and multiple workflow schedulers to handle them.
-In this mode Falcon has 2 components: Prism and Server(s). Both Prism and Server(s) have their own their own config
-locations(startup and runtime properties). In this mode Prism acts as a contact point for Falcon servers. While
- all commands are available through Prism, only read and instance api's are available through Server. You can find the
- instructions for Distributed Mode setup [[Distributed-mode][here]].
-
-
-
----+++Preparing Oozie and Falcon packages for deployment
-<verbatim>
-$cd <<project home>>
-$src/bin/package.sh <<hadoop-version>> <<oozie-version>>
-
->> ex. src/bin/package.sh 1.1.2 4.0.1 or src/bin/package.sh 0.20.2-cdh3u5 4.0.1
->> ex. src/bin/package.sh 2.5.0 4.0.0
->> Falcon package is available in <<falcon home>>/target/apache-falcon-<<version>>-bin.tar.gz
->> Oozie package is available in <<falcon home>>/target/oozie-4.0.1-distro.tar.gz
-</verbatim>
-
-*NOTE:* If you have a separate Apache Oozie installation, you will need to follow some additional steps:
-   1. Once you have setup the Falcon Server, copy libraries under {falcon-server-dir}/oozie/libext/ to {oozie-install-dir}/libext.
-   1. Modify Oozie's configuration file. Copy all Falcon related properties from {falcon-server-dir}/oozie/conf/oozie-site.xml to {oozie-install-dir}/conf/oozie-site.xml
-   1. Restart oozie:
-      1. cd {oozie-install-dir}
-      1. sudo -u oozie ./bin/oozie-stop.sh
-      1. sudo -u oozie ./bin/oozie-setup.sh prepare-war
-      1. sudo -u oozie ./bin/oozie-start.sh

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/LICENSE.txt b/docs/src/site/twiki/LICENSE.txt
deleted file mode 100644
index d3b580f..0000000
--- a/docs/src/site/twiki/LICENSE.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-All files in this directory and subdirectories are under Apache License Version 2.0.
-The reason being Maven Doxia plugin that converts twiki to html does not have
-commenting out feature.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/MigrationInstructions.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/MigrationInstructions.twiki b/docs/src/site/twiki/MigrationInstructions.twiki
deleted file mode 100644
index 7c0e027..0000000
--- a/docs/src/site/twiki/MigrationInstructions.twiki
+++ /dev/null
@@ -1,15 +0,0 @@
----+ Migration Instructions
-
----++ Migrate from 0.5-incubating to 0.6-incubating
-
-This is a placeholder wiki for migration instructions from falcon 0.5-incubating to 0.6-incubating.
-
----+++ Update Entities
-
----+++ Change cluster dir permissions
-
----+++ Enable/Disable TLS
-
----+++ Authorization
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/OnBoarding.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/OnBoarding.twiki b/docs/src/site/twiki/OnBoarding.twiki
deleted file mode 100644
index 8b02150..0000000
--- a/docs/src/site/twiki/OnBoarding.twiki
+++ /dev/null
@@ -1,269 +0,0 @@
----++ Contents
-   * <a href="#Onboarding Steps">Onboarding Steps</a>
-   * <a href="#Sample Pipeline">Sample Pipeline</a>
-   * [[HiveIntegration][Hive Examples]]
-
----+++ Onboarding Steps
-   * Create cluster definition for the cluster, specifying name node, job tracker, workflow engine endpoint, messaging endpoint. Refer to [[EntitySpecification][cluster definition]] for details.
-   * Create Feed definitions for each of the input and output specifying frequency, data path, ownership. Refer to [[EntitySpecification][feed definition]] for details.
-   * Create Process definition for your job. Process defines configuration for the workflow job. Important attributes are frequency, inputs/outputs and workflow path. Refer to [[EntitySpecification][process definition]] for process details.
-   * Define workflow for your job using the workflow engine(only oozie is supported as of now). Refer [[http://oozie.apache.org/docs/3.1.3-incubating/WorkflowFunctionalSpec.html][Oozie Workflow Specification]]. The libraries required for the workflow should be available in lib folder in workflow path.
-   * Set-up workflow definition, libraries and referenced scripts on hadoop. 
-   * Submit cluster definition
-   * Submit and schedule feed and process definitions
-   
-
----+++ Sample Pipeline
----++++ Cluster   
-Cluster definition that contains end points for name node, job tracker, oozie and jms server:
-The cluster locations MUST be created prior to submitting a cluster entity to Falcon.
-*staging* must have 777 permissions and the parent dirs must have execute permissions
-*working* must have 755 permissions and the parent dirs must have execute permissions
-
-<verbatim>
-<?xml version="1.0"?>
-<!--
-    Cluster configuration
-  -->
-<cluster colo="ua2" description="" name="corp" xmlns="uri:falcon:cluster:0.1"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">    
-    <interfaces>
-        <interface type="readonly" endpoint="hftp://name-node.com:50070" version="2.5.0" />
-
-        <interface type="write" endpoint="hdfs://name-node.com:54310" version="2.5.0" />
-
-        <interface type="execute" endpoint="job-tracker:54311" version="2.5.0" />
-
-        <interface type="workflow" endpoint="http://oozie.com:11000/oozie/" version="4.0.1" />
-
-        <interface type="messaging" endpoint="tcp://jms-server.com:61616?daemon=true" version="5.1.6" />
-    </interfaces>
-
-    <locations>
-        <location name="staging" path="/projects/falcon/staging" />
-        <location name="temp" path="/tmp" />
-        <location name="working" path="/projects/falcon/working" />
-    </locations>
-</cluster>
-</verbatim>
-   
----++++ Input Feed
-Hourly feed that defines feed path, frequency, ownership and validity:
-<verbatim>
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Hourly sample input data
-  -->
-
-<feed description="sample input data" name="SampleInput" xmlns="uri:falcon:feed:0.1"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <groups>group</groups>
-
-    <frequency>hours(1)</frequency>
-
-    <late-arrival cut-off="hours(6)" />
-
-    <clusters>
-        <cluster name="corp" type="source">
-            <validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" timezone="UTC" />
-            <retention limit="months(24)" action="delete" />
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/bootcamp/data/${YEAR}-${MONTH}-${DAY}-${HOUR}/SampleInput" />
-        <location type="stats" path="/projects/bootcamp/stats/SampleInput" />
-        <location type="meta" path="/projects/bootcamp/meta/SampleInput" />
-    </locations>
-
-    <ACL owner="suser" group="users" permission="0755" />
-
-    <schema location="/none" provider="none" />
-</feed>
-</verbatim>
-
----++++ Output Feed
-Daily feed that defines feed path, frequency, ownership and validity:
-<verbatim>
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Daily sample output data
-  -->
-
-<feed description="sample output data" name="SampleOutput" xmlns="uri:falcon:feed:0.1"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-    <groups>group</groups>
-
-    <frequency>days(1)</frequency>
-
-    <late-arrival cut-off="hours(6)" />
-
-    <clusters>
-        <cluster name="corp" type="source">
-            <validity start="2009-01-01T00:00Z" end="2099-12-31T00:00Z" timezone="UTC" />
-            <retention limit="months(24)" action="delete" />
-        </cluster>
-    </clusters>
-
-    <locations>
-        <location type="data" path="/projects/bootcamp/output/${YEAR}-${MONTH}-${DAY}/SampleOutput" />
-        <location type="stats" path="/projects/bootcamp/stats/SampleOutput" />
-        <location type="meta" path="/projects/bootcamp/meta/SampleOutput" />
-    </locations>
-
-    <ACL owner="suser" group="users" permission="0755" />
-
-    <schema location="/none" provider="none" />
-</feed>
-</verbatim>
-
----++++ Process
-Sample process which runs daily at 6th hour on corp cluster. It takes one input - !SampleInput for the previous day(24 instances). It generates one output - !SampleOutput for previous day. The workflow is defined at /projects/bootcamp/workflow/workflow.xml. Any libraries available for the workflow should be at /projects/bootcamp/workflow/lib. The process also defines properties queueName, ssh.host, and fileTimestamp which are passed to the workflow. In addition, Falcon exposes the following properties to the workflow: nameNode, jobTracker(hadoop properties), input and output(Input/Output properties).
-
-<verbatim>
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday
- -->
-<process name="SampleProcess">
-    <cluster name="corp" />
-
-    <frequency>days(1)</frequency>
-
-    <validity start="2012-04-03T06:00Z" end="2022-12-30T00:00Z" timezone="UTC" />
-
-    <inputs>
-        <input name="input" feed="SampleInput" start="yesterday(0,0)" end="today(-1,0)" />
-    </inputs>
-
-    <outputs>
-            <output name="output" feed="SampleOutput" instance="yesterday(0,0)" />
-    </outputs>
-
-    <properties>
-        <property name="queueName" value="reports" />
-        <property name="ssh.host" value="host.com" />
-        <property name="fileTimestamp" value="${coord:formatTime(coord:nominalTime(), 'yyyy-MM-dd')}" />
-    </properties>
-
-    <workflow engine="oozie" path="/projects/bootcamp/workflow" />
-
-    <retry policy="periodic" delay="minutes(5)" attempts="3" />
-    
-    <late-process policy="exp-backoff" delay="hours(1)">
-        <late-input input="input" workflow-path="/projects/bootcamp/workflow/lateinput" />
-    </late-process>
-</process>
-</verbatim>
-
----++++ Oozie Workflow
-The sample user workflow contains 3 actions:
-   * Pig action - Executes pig script /projects/bootcamp/workflow/script.pig
-   * concatenator - Java action that concatenates part files and generates a single file
-   * file upload - ssh action that gets the concatenated file from hadoop and sends the file to a remote host
-   
-<verbatim>
-<workflow-app xmlns="uri:oozie:workflow:0.2" name="sample-wf">
-        <start to="pig" />
-
-        <action name="pig">
-                <pig>
-                        <job-tracker>${jobTracker}</job-tracker>
-                        <name-node>${nameNode}</name-node>
-                        <prepare>
-                                <delete path="${output}"/>
-                        </prepare>
-                        <configuration>
-                                <property>
-                                        <name>mapred.job.queue.name</name>
-                                        <value>${queueName}</value>
-                                </property>
-                                <property>
-                                        <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-                                        <value>true</value>
-                                </property>
-                        </configuration>
-                        <script>${nameNode}/projects/bootcamp/workflow/script.pig</script>
-                        <param>input=${input}</param>
-                        <param>output=${output}</param>
-                        <file>lib/dependent.jar</file>
-                </pig>
-                <ok to="concatenator" />
-                <error to="fail" />
-        </action>
-
-        <action name="concatenator">
-                <java>
-                        <job-tracker>${jobTracker}</job-tracker>
-                        <name-node>${nameNode}</name-node>
-                        <prepare>
-                                <delete path="${nameNode}/projects/bootcamp/concat/data-${fileTimestamp}.csv"/>
-                        </prepare>
-                        <configuration>
-                                <property>
-                                        <name>mapred.job.queue.name</name>
-                                        <value>${queueName}</value>
-                                </property>
-                        </configuration>
-                        <main-class>com.wf.Concatenator</main-class>
-                        <arg>${output}</arg>
-                        <arg>${nameNode}/projects/bootcamp/concat/data-${fileTimestamp}.csv</arg>
-                </java>
-                <ok to="fileupload" />
-                <error to="fail"/>
-        </action>
-                        
-        <action name="fileupload">
-                <ssh>
-                        <host>localhost</host>
-                        <command>/tmp/fileupload.sh</command>
-                        <args>${nameNode}/projects/bootcamp/concat/data-${fileTimestamp}.csv</args>
-                        <args>${wf:conf("ssh.host")}</args>
-                        <capture-output/>
-                </ssh>
-                <ok to="fileUploadDecision" />
-                <error to="fail"/>
-        </action>
-
-        <decision name="fileUploadDecision">
-                <switch>
-                        <case to="end">
-                                ${wf:actionData('fileupload')['output'] == '0'}
-                        </case>
-                        <default to="fail"/>
-                </switch>
-        </decision>
-
-        <kill name="fail">
-                <message>Workflow failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
-        </kill>
-
-        <end name="end" />
-</workflow-app>
-</verbatim>
-
----++++ File Upload Script
-The script gets the file from hadoop, rsyncs the file to /tmp on remote host and deletes the file from hadoop
-<verbatim>
-#!/bin/bash
-
-trap 'echo "output=$?"; exit $?' ERR INT TERM
-
-echo "Arguments: $@"
-SRCFILE=$1
-DESTHOST=$3
-
-FILENAME=`basename $SRCFILE`
-rm -f /tmp/$FILENAME
-hadoop fs -copyToLocal $SRCFILE /tmp/
-echo "Copied $SRCFILE to /tmp"
-
-rsync -ztv --rsh=ssh --stats /tmp/$FILENAME $DESTHOST:/tmp
-echo "rsynced $FILENAME to $DESTUSER@$DESTHOST:$DESTFILE"
-
-hadoop fs -rmr $SRCFILE
-echo "Deleted $SRCFILE"
-
-rm -f /tmp/$FILENAME
-echo "output=0"
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Operability.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Operability.twiki b/docs/src/site/twiki/Operability.twiki
deleted file mode 100644
index 05850c1..0000000
--- a/docs/src/site/twiki/Operability.twiki
+++ /dev/null
@@ -1,110 +0,0 @@
----+ Operationalizing Falcon
-
----++ Overview
-
-Apache Falcon provides various tools to operationalize Falcon consisting of Alerts for
-unrecoverable errors, Audits of user actions, Metrics, and Notifications. They are detailed below.
-
-++ Lineage
-
-Currently Lineage has no way to access or restore information about entity instances created during the time lineage
-was disabled. Information about entities however, is preserved and bootstrapped when lineage is enabled. If you have to
-reset the graph db then you can delete the graph db files as specified in the startup.properties and restart the falcon.
-Please note: you will loose all the information about the instances if you delete the graph db.
-
----++ Monitoring
-
-Falcon provides monitoring of various events by capturing metrics of those events.
-The metric numbers can then be used to monitor performance and health of the Falcon system and
-the entire processing pipelines.
-
-Falcon also exposes [[https://github.com/thinkaurelius/titan/wiki/Titan-Performance-and-Monitoring][metrics for titandb]]
-
-Users can view the logs of these events in the metric.log file, by default this file is created
-under ${user.dir}/logs/ directory. Users may also extend the Falcon monitoring framework to send
-events to systems like Mondemand/lwes by implementingorg.apache.falcon.plugin.MonitoringPlugin
-interface.
-
-The following events are captured by Falcon for logging the metrics:
-   1. New cluster definitions posted to Falcon (success & failures)
-   1. New feed definition posted to Falcon (success & failures)
-   1. New process definition posted to Falcon (success & failures)
-   1. Process update events (success & failures)
-   1. Feed update events (success & failures)
-   1. Cluster update events (success & failures)
-   1. Process suspend events (success & failures)
-   1. Feed suspend events (success & failures)
-   1. Process resume events (success & failures)
-   1. Feed resume events (success & failures)
-   1. Process remove events (success & failures)
-   1. Feed remove events (success & failures)
-   1. Cluster remove events (success & failures)
-   1. Process instance kill events (success & failures)
-   1. Process instance re-run events (success & failures)
-   1. Process instance generation events
-   1. Process instance failure events
-   1. Process instance auto-retry events
-   1. Process instance retry exhaust events
-   1. Feed instance deletion event
-   1. Feed instance deletion failure event (no retries)
-   1. Feed instance replication event
-   1. Feed instance replication failure event
-   1. Feed instance replication auto-retry event
-   1. Feed instance replication retry exhaust event
-   1. Feed instance late arrival event
-   1. Feed instance post cut-off arrival event
-   1. Process re-run due to late feed event
-   1. Transaction rollback failed event
-
-The metric logged for an event has the following properties:
-   1. Action - Name of the event.
-   2. Dimensions - A list of name/value pairs of various attributes for a given action.
-   3. Status- Status of an action FAILED/SUCCEEDED.
-   4. Time-taken - Time taken in nanoseconds for a given action.
-
-An example for an event logged for a submit of a new process definition:
-
-   2012-05-04 12:23:34,026 {Action:submit, Dimensions:{entityType=process}, Status: SUCCEEDED, Time-taken:97087000 ns}
-
-Users may parse the metric.log or capture these events from custom monitoring frameworks and can plot various graphs
-or send alerts according to their requirements.
-
-
----++ Notifications
-
-Falcon creates a JMS topic for every process/feed that is scheduled in Falcon.
-The implementation class and the broker url of the JMS engine are read from the dependent cluster's definition.
-Users may register consumers on the required topic to check the availability or status of feed instances.
-
-For a given process that is scheduled, the name of the topic is same as the process name.
-Falcon sends a Map message for every feed produced by the instance of a process to the JMS topic.
-The JMS !MapMessage sent to a topic has the following properties:
-entityName, feedNames, feedInstancePath, workflowId, runId, nominalTime, timeStamp, brokerUrl, brokerImplClass, entityType, operation, logFile, topicName, status, brokerTTL;
-
-For a given feed that is scheduled, the name of the topic is same as the feed name.
-Falcon sends a map message for every feed instance that is deleted/archived/replicated depending upon the retention policy set in the feed definition.
-The JMS !MapMessage sent to a topic has the following properties:
-entityName, feedNames, feedInstancePath, workflowId, runId, nominalTime, timeStamp, brokerUrl, brokerImplClass, entityType, operation, logFile, topicName, status, brokerTTL;
-
-The JMS messages are automatically purged after a certain period (default 3 days) by the Falcon JMS house-keeping service.TTL (Time-to-live) for JMS message
-can be configured in the Falcon's startup.properties file.
-
-
----++ Alerts
-
-Falcon generates alerts for unrecoverable errors into a log file by default.
-Users can view these alerts in the alerts.log file, by default this file is created
-under ${user.dir}/logs/ directory.
-
-Users may also extend the Falcon Alerting plugin to send events to systems like Nagios, etc. by
-extending org.apache.falcon.plugin.AlertingPlugin interface.
-
-
----++ Audits
-
-Falcon audits all user activity and captures them into a log file by default.
-Users can view these audits in the audit.log file, by default this file is created
-under ${user.dir}/logs/ directory.
-
-Users may also extend the Falcon Audit plugin to send audits to systems like Apache Argus, etc. by
-extending org.apache.falcon.plugin.AuditingPlugin interface.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Recipes.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Recipes.twiki b/docs/src/site/twiki/Recipes.twiki
deleted file mode 100644
index f6011c4..0000000
--- a/docs/src/site/twiki/Recipes.twiki
+++ /dev/null
@@ -1,85 +0,0 @@
----+ Falcon Recipes
-
----++ Overview
-
-A Falcon recipe is a static process template with parameterized workflow to realize a specific use case. Recipes are
-defined in user space. Recipes will not have support for update or lifecycle management.
-
-For example:
-
-   * Replicating directories from one HDFS cluster to another (not timed partitions)
-   * Replicating hive metadata (database, table, views, etc.)
-   * Replicating between HDFS and Hive - either way
-   * Data masking etc.
-
----++ Proposal
-
-Falcon provides a Process abstraction that encapsulates the configuration for a user workflow with scheduling
-controls. All recipes can be modeled as a Process with in Falcon which executes the user workflow periodically. The
-process and its associated workflow are parameterized. The user will provide a properties file with name value pairs
-that are substituted by falcon before scheduling it. Falcon translates these recipes as a process entity by
-replacing the parameters in the workflow definition.
-
----++ Falcon CLI recipe support
-
-Falcon CLI functionality to support recipes has been added.
-<a href="./FalconCLI.html">Recipe command usage is defined here.</a>
-
-CLI accepts recipe option with a recipe name and optional tool and does the following:
-   * Validates the options; name option is mandatory and tool is optional and should be provided if user wants to override the base recipe tool
-   * Looks for <name>-workflow.xml, <name>-template.xml and <name>.properties file in the path specified by falcon.recipe.path in client.properties. If files cannot be found then Falcon CLI will fail
-   * Invokes a Tool to substitute the properties in the templated process for the recipe. By default invokes base tool if tool option is not passed. Tool is responsible for generating process entity at the path specified by FalconCLI
-   * Validates the generated entity
-   * Submit and schedule this entity
-   * Generated process entity files are stored in tmp directory
-
----++ Base Recipe tool
-
-Falcon provides a base tool that recipes can override. Base Recipe tool does the following:
-   * Expects recipe template file path, recipe properties file path and path where process entity to be submitted should be generated. Validates these arguments
-   * Validates the artifacts i.e. workflow and/or lib files specified in the recipe template exists on local filesystem or HDFS at the specified path else returns error
-   * Copies if the artifacts exists on local filesystem
-      * If workflow is on local FS then falcon.recipe.workflow.path in recipe property file is mandatory for it to be copied to HDFS. If templated process requires custom libs falcon.recipe.workflow.lib.path property is mandatory for them to be copied from Local FS to HDFS. Recipe tool will copy the local artifacts only if these properties are set in properties file
-   * Looks for the patten ##[A-Za-z0-9_.]*## in the templated process and substitutes it with the properties. Process entity generated after the substitution is written to the empty file passed by FalconCLI
-
----++ Recipe template file format
-
-   * Any templatized string should be in the format ##[A-Za-z0-9_.]*##.
-   * There should be a corresponding entry in the recipe properties file "falcon.recipe.<templatized-string> = <value to be substituted>"
-
-<verbatim>
-Example: If the entry in recipe template is <workflow name="##workflow.name##"> there should be a corresponding entry in the recipe properties file falcon.recipe.workflow.name=hdfs-dr-workflow
-</verbatim>
-
----++ Recipe properties file format
-
-   * Regular key value pair properties file
-   * Property key should be prefixed by "falcon.recipe."
-
-<verbatim>
-Example: falcon.recipe.workflow.name=hdfs-dr-workflow
-Recipe template will have <workflow name="##workflow.name##">. Recipe tool will look for the patten ##workflow.name##
-and replace it with the property value "hdfs-dr-workflow". Substituted template will have <workflow name="hdfs-dr-workflow">
-</verbatim>
-
----++ Metrics
-HDFS DR and Hive DR recipes will capture the replication metrics like TIMETAKEN, BYTESCOPIED, COPY (number of files copied) for an
-instance and populate to the GraphDB.
-
----++ Managing the scheduled recipe process
-   * Scheduled recipe process is similar to regular process
-      * List : falcon entity -type process -name <recipe-process-name> -list
-      * Status : falcon entity -type process -name <recipe-process-name> -status
-      * Delete : falcon entity -type process -name <recipe-process-name> -delete
-
----++ Sample recipes
-
-   * Sample recipes are published in addons/recipes
-
----++ Types of recipes
-   * [[HDFSDR][HDFS Recipe]]
-   * [[HiveDR][HiveDR Recipe]]
-
----++ Packaging
-
-   * There is no packaging for recipes at this time but will be added soon.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Security.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Security.twiki b/docs/src/site/twiki/Security.twiki
deleted file mode 100644
index 8955bdc..0000000
--- a/docs/src/site/twiki/Security.twiki
+++ /dev/null
@@ -1,387 +0,0 @@
----+ Securing Falcon
-
----++ Overview
-
-Apache Falcon enforces authentication and authorization which are detailed below. Falcon also
-provides transport level security ensuring data confidentiality and integrity.
-
-
----++ Authentication (User Identity)
-
-Apache Falcon enforces authentication on protected resources. Once authentication has been established it sets a
-signed HTTP Cookie that contains an authentication token with the user name, user principal,
-authentication type and expiration time.
-
-It does so by using [[http://hadoop.apache .org/docs/current/hadoop-auth/index.html][Hadoop Auth]].
-Hadoop Auth is a Java library consisting of a client and a server components to enable Kerberos SPNEGO authentication
-for HTTP. Hadoop Auth also supports additional authentication mechanisms on the client and the server side via 2
-simple interfaces.
-
-
----+++ Authentication Methods
-
-It supports 2 authentication methods, simple and kerberos out of the box.
-
----++++ Pseudo/Simple Authentication
-
-Falcon authenticates the user by simply trusting the value of the query string parameter 'user.name'. This is the
-default mode Falcon is configured with.
-
----++++ Kerberos Authentication
-
-Falcon uses HTTP Kerberos SPNEGO to authenticate the user.
-
-
----++ Authorization
-
-Falcon also enforces authorization on Entities using ACLs (Access Control Lists). ACLs are useful
-for implementing permission requirements and provide a way to set different permissions for
-specific users or named groups.
-
-By default, support for authorization is disabled and can be enabled in startup.properties.
-
----+++ ACLs in Entity
-
-All Entities now have ACL which needs to be present if authorization is enabled. Only owners who
-own or created the entity will be allowed to update or delete their entities.
-
-An entity has ACLs (Access Control Lists) that are useful for implementing permission requirements
-and provide a way to set different permissions for specific users or named groups.
-<verbatim>
-    <ACL owner="test-user" group="test-group" permission="*"/>
-</verbatim>
-ACL indicates the Access control list for this cluster.
-owner is the Owner of this entity.
-group is the one which has access to read.
-permission indicates the rwx is not enforced at this time.
-
----+++ Super-User
-
-The super-user is the user with the same identity as falcon process itself. Loosely, if you
-started the falcon, then you are the super-user. The super-user can do anything in that
-permissions checks never fail for the super-user. There is no persistent notion of who was the
-super-user; when the falcon is started the process identity determines who is the super-user
-for now. The Falcon super-user does not have to be the super-user of the falcon host, nor is it
-necessary that all clusters have the same super-user. Also, an experimenter running Falcon on a
-personal workstation, conveniently becomes that installation's super-user without any configuration.
-
-Falcon also allows users to configure a super user group and allows users belonging to this
-group to be a super user.
-
-ACL owner and group must be valid even if the authenticated user is a super-user.
-
----+++ Group Memberships
-
-Once a user has been authenticated and a username has been determined, the list of groups is
-determined by a group mapping service, configured by the hadoop.security.group.mapping property
-in Hadoop. The default implementation, org.apache.hadoop.security.ShellBasedUnixGroupsMapping,
-will shell out to the Unix bash -c groups command to resolve a list of groups for a user.
-
-Note that Falcon stores the user and group of an Entity as strings; there is no
-conversion from user and group identity numbers as is conventional in Unix.
-
-The only limitation is that a user cannot add a group in ACL that he does not belong to.
-
----+++ Authorization Provider
-
-Falcon provides a plugin-able provider interface for Authorization. It also ships with a default
-implementation that enforces the following authorization policy.
-
----++++ Entity and Instance Management Operations Policy
-
-   * All Entity and Instance operations are authorized for users who created them, Owners and users with group memberships
-   * Reference to entities with in a feed or process is allowed with out enforcing permissions
-
-Any Feed or Process can refer to a Cluster entity not owned by the Feed or Process owner. Any Process can refer to a Feed entity not owned by the Process owner
-
-The authorization is enforced in the following way:
-
-   * if admin resource,
-      * If authenticated user name matches the admin users configuration
-      * Else if groups of the authenticated user matches the admin groups configuration
-      * Else authorization exception is thrown
-   * Else if entities or instance resource
-      * If the authenticated user matches the owner in ACL for the entity
-      * Else if the groups of the authenticated user matches the group in ACL for the entity
-      * Else authorization exception is thrown
-   * Else if lineage resource
-      * All have read-only permissions, reason being folks should be able to examine the dependency and allow reuse
-
-To authenticate user for REST api calls, user should append "user.name=<username>" to the query.
-
-*operations on Entity Resource*
-
-| *Resource*                                                                          | *Description*                      | *Authorization* |
-| [[restapi/EntityValidate][api/entities/validate/:entity-type]]                      | Validate the entity                | Owner/Group     |
-| [[restapi/EntitySubmit][api/entities/submit/:entity-type]]                          | Submit the entity                  | Owner/Group     |
-| [[restapi/EntityUpdate][api/entities/update/:entity-type/:entity-name]]             | Update the entity                  | Owner/Group     |
-| [[restapi/EntitySubmitAndSchedule][api/entities/submitAndSchedule/:entity-type]]    | Submit & Schedule the entity       | Owner/Group     |
-| [[restapi/EntitySchedule][api/entities/schedule/:entity-type/:entity-name]]         | Schedule the entity                | Owner/Group     |
-| [[restapi/EntitySuspend][api/entities/suspend/:entity-type/:entity-name]]           | Suspend the entity                 | Owner/Group     |
-| [[restapi/EntityResume][api/entities/resume/:entity-type/:entity-name]]             | Resume the entity                  | Owner/Group     |
-| [[restapi/EntityDelete][api/entities/delete/:entity-type/:entity-name]]             | Delete the entity                  | Owner/Group     |
-| [[restapi/EntityStatus][api/entities/status/:entity-type/:entity-name]]             | Get the status of the entity       | Owner/Group     |
-| [[restapi/EntityDefinition][api/entities/definition/:entity-type/:entity-name]]     | Get the definition of the entity   | Owner/Group     |
-| [[restapi/EntityList][api/entities/list/:entity-type?fields=:fields]]               | Get the list of entities           | Owner/Group     |
-| [[restapi/EntityDependencies][api/entities/dependencies/:entity-type/:entity-name]] | Get the dependencies of the entity | Owner/Group     |
-
-*REST Call on Feed and Process Instances*
-
-| *Resource*                                                                  | *Description*                | *Authorization* |
-| [[restapi/InstanceRunning][api/instance/running/:entity-type/:entity-name]] | List of running instances.   | Owner/Group     |
-| [[restapi/InstanceStatus][api/instance/status/:entity-type/:entity-name]]   | Status of a given instance   | Owner/Group     |
-| [[restapi/InstanceKill][api/instance/kill/:entity-type/:entity-name]]       | Kill a given instance        | Owner/Group     |
-| [[restapi/InstanceSuspend][api/instance/suspend/:entity-type/:entity-name]] | Suspend a running instance   | Owner/Group     |
-| [[restapi/InstanceResume][api/instance/resume/:entity-type/:entity-name]]   | Resume a given instance      | Owner/Group     |
-| [[restapi/InstanceRerun][api/instance/rerun/:entity-type/:entity-name]]     | Rerun a given instance       | Owner/Group     |
-| [[InstanceLogs][api/instance/logs/:entity-type/:entity-name]]               | Get logs of a given instance | Owner/Group     |
-
----++++ Admin Resources Policy
-
-Only users belonging to admin users or groups have access to this resource. Admin membership is
-determined by a static configuration parameter.
-
-| *Resource*                                             | *Description*                               | *Authorization*  |
-| [[restapi/AdminVersion][api/admin/version]]            | Get version of the server                   | No restriction   |
-| [[restapi/AdminStack][api/admin/stack]]                | Get stack of the server                     | Admin User/Group |
-| [[restapi/AdminConfig][api/admin/config/:config-type]] | Get configuration information of the server | Admin User/Group |
-
-
----++++ Lineage Resource Policy
-
-Lineage is read-only and hence all users can look at lineage for their respective entities.
-*Note:* This gap will be fixed in a later release.
-
-
----++ Authentication Configuration
-
-Following is the Server Side Configuration Setup for Authentication.
-
----+++ Common Configuration Parameters
-
-<verbatim>
-# Authentication type must be specified: simple|kerberos
-*.falcon.authentication.type=kerberos
-</verbatim>
-
----+++ Kerberos Configuration
-
-<verbatim>
-##### Service Configuration
-
-# Indicates the Kerberos principal to be used in Falcon Service.
-*.falcon.service.authentication.kerberos.principal=falcon/_HOST@EXAMPLE.COM
-
-# Location of the keytab file with the credentials for the Service principal.
-*.falcon.service.authentication.kerberos.keytab=/etc/security/keytabs/falcon.service.keytab
-
-# name node principal to talk to config store
-*.dfs.namenode.kerberos.principal=nn/_HOST@EXAMPLE.COM
-
-# Indicates how long (in seconds) falcon authentication token is valid before it has to be renewed.
-*.falcon.service.authentication.token.validity=86400
-
-##### SPNEGO Configuration
-
-# Authentication type must be specified: simple|kerberos|<class>
-# org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler can be used for backwards compatibility
-*.falcon.http.authentication.type=kerberos
-
-# Indicates how long (in seconds) an authentication token is valid before it has to be renewed.
-*.falcon.http.authentication.token.validity=36000
-
-# The signature secret for signing the authentication tokens.
-*.falcon.http.authentication.signature.secret=falcon
-
-# The domain to use for the HTTP cookie that stores the authentication token.
-*.falcon.http.authentication.cookie.domain=
-
-# Indicates if anonymous requests are allowed when using 'simple' authentication.
-*.falcon.http.authentication.simple.anonymous.allowed=true
-
-# Indicates the Kerberos principal to be used for HTTP endpoint.
-# The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
-*.falcon.http.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
-
-# Location of the keytab file with the credentials for the HTTP principal.
-*.falcon.http.authentication.kerberos.keytab=/etc/security/keytabs/spnego.service.keytab
-
-# The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.
-*.falcon.http.authentication.kerberos.name.rules=DEFAULT
-
-# Comma separated list of black listed users
-*.falcon.http.authentication.blacklisted.users=
-
-# Increase Jetty request buffer size to accommodate the generated Kerberos token
-*.falcon.jetty.request.buffer.size=16192
-</verbatim>
-
----+++ Pseudo/Simple Configuration
-
-<verbatim>
-##### SPNEGO Configuration
-
-# Authentication type must be specified: simple|kerberos|<class>
-# org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler can be used for backwards compatibility
-*.falcon.http.authentication.type=simple
-
-# Indicates how long (in seconds) an authentication token is valid before it has to be renewed.
-*.falcon.http.authentication.token.validity=36000
-
-# The signature secret for signing the authentication tokens.
-*.falcon.http.authentication.signature.secret=falcon
-
-# The domain to use for the HTTP cookie that stores the authentication token.
-*.falcon.http.authentication.cookie.domain=
-
-# Indicates if anonymous requests are allowed when using 'simple' authentication.
-*.falcon.http.authentication.simple.anonymous.allowed=true
-
-# Comma separated list of black listed users
-*.falcon.http.authentication.blacklisted.users=
-</verbatim>
-
----++ Authorization Configuration
-
----+++ Enabling Authorization
-By default, support for authorization is disabled and specifying ACLs in entities are optional.
-To enable support for authorization, set falcon.security.authorization.enabled to true in the
-startup configuration.
-
-<verbatim>
-# Authorization Enabled flag: false|true
-*.falcon.security.authorization.enabled=true
-</verbatim>
-
----+++ Authorization Provider
-
-Falcon provides a basic implementation for Authorization bundled, org.apache.falcon.security .DefaultFalconAuthorizationProvider.
-This can be overridden by custom implementations in the startup configuration.
-
-<verbatim>
-# Authorization Provider Fully Qualified Class Name
-*.falcon.security.authorization.provider=org.apache.falcon.security.DefaultAuthorizationProvider
-</verbatim>
-
----+++ Super User Group
-
-Super user group is determined by the configuration:
-
-<verbatim>
-# The name of the group of super-users
-*.falcon.security.authorization.superusergroup=falcon
-</verbatim>
-
----+++ Admin Membership
-
-Administrative users are determined by the configuration:
-
-<verbatim>
-# Admin Users, comma separated users
-*.falcon.security.authorization.admin.users=falcon,ambari-qa,seetharam
-</verbatim>
-
-Administrative groups are determined by the configuration:
-
-<verbatim>
-# Admin Group Membership, comma separated users
-*.falcon.security.authorization.admin.groups=falcon,testgroup,staff
-</verbatim>
-
-
----++ SSL
-
-Falcon provides transport level security ensuring data confidentiality and integrity. This is
-enabled by default for communicating over HTTP between the client and the server.
-
----+++ SSL Configuration
-
-<verbatim>
-*.falcon.enableTLS=true
-*.keystore.file=/path/to/keystore/file
-*.keystore.password=password
-</verbatim>
-
----+++ Distributed Falcon Setup
-
-Falcon should be configured to communicate with Prism over TLS in secure mode. Its not enabled by default.
-
-
----++ Changes to ownership and permissions of directories managed by Falcon
-
-| *Directory*              | *Location*                                                        | *Owner* | *Permissions* |
-| Configuration Store      | ${config.store.uri}                                               | falcon  | 700           |
-| Cluster Staging Location | ${cluster.staging-location}                                       | falcon  | 777           |
-| Cluster Working Location | ${cluster.working-location}                                       | falcon  | 755           |
-| Shared libs              | {cluster.working}/{lib,libext}                                    | falcon  | 755           |
-| Oozie coord/bundle XMLs  | ${cluster.staging-location}/workflows/{entity}/{entity-name}      | $user   | cluster umask |
-| App logs                 | ${cluster.staging-location}/workflows/{entity}/{entity-name}/logs | $user   | cluster umask |
-
-*Note:* Please note that the cluster staging and working locations MUST be created prior to
-submitting a cluster entity to Falcon. Also, note that the the parent dirs must have execute
-permissions.
-
-
----++ Backwards compatibility
-
----+++ Scheduled Entities
-
-Entities already scheduled with an earlier version of Falcon are not compatible with this version
-
----+++ Falcon Clients
-
-Older Falcon clients are backwards compatible wrt Authentication and user information sent as part of the HTTP
-header, Remote-User is still honoured when the authentication type is configured as below:
-
-<verbatim>
-*.falcon.http.authentication.type=org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler
-</verbatim>
-
----+++ Blacklisted super users for authentication
-
-The blacklist users used to have the following super users: hdfs, mapreduce, oozie, and falcon.
-The list is externalized from code into Startup.properties file and is empty now and needs to be
-configured specifically in the file.
-
-
----+++ Falcon Dashboard
-
-To initialize the current user for dashboard, user should append query param "user.name=<username>" to the REST api call.
-
-If dashboard user wishes to change the current user, they should do the following.
-   * delete the hadoop.auth cookie from browser cache.
-   * append query param "user.name=<new_user>" to the next REST API call.
-
-In Kerberos method, the browser must support HTTP Kerberos SPNEGO.
-
-
----++ Known Limitations
-
-   * ActiveMQ topics are not secure but will be in the near future
-   * Entities already scheduled with an earlier version of Falcon are not compatible with this version as new
-   workflow parameters are being passed back into Falcon such as the user are required
-   * Use of hftp as the scheme for read only interface in cluster entity [[https://issues.apache.org/jira/browse/HADOOP-10215][will not work in Oozie]]
-   The alternative is to use webhdfs scheme instead and its been tested with DistCp.
-
-
----++ Examples
-
----+++ Accessing the server using Falcon CLI (Java client)
-
-There is no change in the way the CLI is used. The CLI has been changed to work with the configured authentication
-method.
-
----+++ Accessing the server using curl
-
-Try accessing protected resources using curl. The protected resources are:
-
-<verbatim>
-$ kinit
-Please enter the password for venkatesh@LOCALHOST:
-
-$ curl http://localhost:15000/api/admin/version
-
-$ curl http://localhost:15000/api/admin/version?user.name=venkatesh
-
-$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt curl http://localhost:15000/api/admin/version
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/CommonCLI.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/CommonCLI.twiki b/docs/src/site/twiki/falconcli/CommonCLI.twiki
deleted file mode 100644
index fab2ed1..0000000
--- a/docs/src/site/twiki/falconcli/CommonCLI.twiki
+++ /dev/null
@@ -1,21 +0,0 @@
----++ Common CLI Options
-
----+++Falcon URL
-
-Optional -url option indicating the URL of the Falcon system to run the command against can be provided.  If not mentioned it will be picked from the system environment variable FALCON_URL. If FALCON_URL is not set then it will be picked from client.properties file. If the option is not
-provided and also not set in client.properties, Falcon CLI will fail.
-
----+++Proxy user support
-
-The -doAs option allows the current user to impersonate other users when interacting with the Falcon system. The current user must be configured as a proxyuser in the Falcon system. The proxyuser configuration may restrict from
-which hosts a user may impersonate users, as well as users of which groups can be impersonated.
-
-<a href="../FalconDocumentation.html#Proxyuser_support">Proxyuser support described here.</a>
-
----+++Debug Mode
-
-If you export FALCON_DEBUG=true then the Falcon CLI will output the Web Services API details used by any commands you execute. This is useful for debugging purposes to or see how the Falcon CLI works with the WS API.
-Alternately, you can specify '-debug' through the CLI arguments to get the debug statements.
-
-Example:
-$FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml -debug
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ContinueInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ContinueInstance.twiki b/docs/src/site/twiki/falconcli/ContinueInstance.twiki
deleted file mode 100644
index 304e281..0000000
--- a/docs/src/site/twiki/falconcli/ContinueInstance.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Continue
-
-[[CommonCLI][Common CLI Options]]
-
-Continue option is used to continue the failed workflow instance. This option is valid only for process instances in terminal state, i.e. KILLED or FAILED.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -continue -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/Definition.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/Definition.twiki b/docs/src/site/twiki/falconcli/Definition.twiki
deleted file mode 100644
index 08d46c7..0000000
--- a/docs/src/site/twiki/falconcli/Definition.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Definition
-
-[[CommonCLI][Common CLI Options]]
-
-Definition option returns the entity definition submitted earlier during submit step.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -definition

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/DeleteEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/DeleteEntity.twiki b/docs/src/site/twiki/falconcli/DeleteEntity.twiki
deleted file mode 100644
index f2b3080..0000000
--- a/docs/src/site/twiki/falconcli/DeleteEntity.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Delete
-
-[[CommonCLI][Common CLI Options]]
-
-Delete removes the submitted entity definition for the specified entity and put it into the archive.
-
-Usage:
-$FALCON_HOME/bin/falcon entity  -type [cluster|datasource|feed|process] -name <<name>> -delete

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/DependencyEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/DependencyEntity.twiki b/docs/src/site/twiki/falconcli/DependencyEntity.twiki
deleted file mode 100644
index bdef1d7..0000000
--- a/docs/src/site/twiki/falconcli/DependencyEntity.twiki
+++ /dev/null
@@ -1,10 +0,0 @@
----+++Dependency
-
-[[CommonCLI][Common CLI Options]]
-
-With the use of dependency option, we can list all the entities on which the specified entity is dependent.
-For example for a feed, dependency return the cluster name and for process it returns all the input feeds,
-output feeds and cluster names.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type [cluster|datasource|feed|process] -name <<name>> -dependency
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/DependencyInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/DependencyInstance.twiki b/docs/src/site/twiki/falconcli/DependencyInstance.twiki
deleted file mode 100644
index 51508cc..0000000
--- a/docs/src/site/twiki/falconcli/DependencyInstance.twiki
+++ /dev/null
@@ -1,33 +0,0 @@
----+++Dependency
-Display the dependent instances which are dependent on the given instance. For example for a given process instance it will
-list all the input feed instances(if any) and the output feed instances(if any).
-
-An example use case of this command is as follows:
-Suppose you find out that the data in a feed instance was incorrect and you need to figure out which all process instances
-consumed this feed instance so that you can reprocess them after correcting the feed instance. You can give the feed instance
-and it will tell you which process instance produced this feed and which all process instances consumed this feed.
-
-NOTE:
-1. instanceTime must be a valid instanceTime e.g. instanceTime of a feed should be in it's validity range on applicable clusters,
- and it should be in the range of instances produced by the producer process(if any)
-
-2. For processes with inputs like latest() which vary with time the results are not guaranteed to be correct.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -dependency -instanceTime "yyyy-MM-dd'T'HH:mm'Z'"
-
-For example:
-$FALCON_HOME/bin/falcon instance -dependency -type feed -name out -instanceTime 2014-12-15T00:00Z
-name: producer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:00Z, tags: Output
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:03Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:04Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:02Z, tags: Input
-name: consumer, type: PROCESS, cluster: local, instanceTime: 2014-12-15T00:05Z, tags: Input
-
-
-Response: default/Success!
-
-Request Id: default/1125035965@qtp-503156953-7 - 447be0ad-1d38-4dce-b438-20f3de69b172
-
-
-<a href="../Restapi/InstanceDependencies.html">Optional params described here.</a>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/EdgeMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/EdgeMetadata.twiki b/docs/src/site/twiki/falconcli/EdgeMetadata.twiki
deleted file mode 100644
index 477996e..0000000
--- a/docs/src/site/twiki/falconcli/EdgeMetadata.twiki
+++ /dev/null
@@ -1,11 +0,0 @@
----+++ Edge
-
-[[CommonCLI][Common CLI Options]]
-
-Get the edge with the specified id.
-
-Usage:
-$FALCON_HOME/bin/falcon metadata -edge -id <<id>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -edge -id Q9n-Q-5g
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/FalconCLI.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/FalconCLI.twiki b/docs/src/site/twiki/falconcli/FalconCLI.twiki
deleted file mode 100644
index 0c0082f..0000000
--- a/docs/src/site/twiki/falconcli/FalconCLI.twiki
+++ /dev/null
@@ -1,112 +0,0 @@
----+FalconCLI
-
-FalconCLI is a interface between user and Falcon. It is a command line utility provided by Falcon. FalconCLI supports Entity Management, Instance Management and Admin operations.There is a set of web services that are used by FalconCLI to interact with Falcon.
-
----+++Types of CLI Options
-
-CLI options are classified into :
-
-   * <a href="#Common_CLI_Options">Common CLI Options</a>
-   * <a href="#Entity_Management_Commands">Entity Management Commands</a>
-   * <a href="#Instance_Management_Commands">Instance Management Commands</a>
-   * <a href="#Metadata_Commands">Metadata Commands</a>
-   * <a href="#Admin_Commands">Admin commands</a>
-   * <a href="#Recipe_Commands">Recipe commands</a>
-
-
-
------------
-
----++Common CLI Options
-
----+++Falcon URL
-
-Optional -url option indicating the URL of the Falcon system to run the command against can be provided.  If not mentioned it will be picked from the system environment variable FALCON_URL. If FALCON_URL is not set then it will be picked from client.properties file. If the option is not
-provided and also not set in client.properties, Falcon CLI will fail.
-
----+++Proxy user support
-
-The -doAs option allows the current user to impersonate other users when interacting with the Falcon system. The current user must be configured as a proxyuser in the Falcon system. The proxyuser configuration may restrict from
-which hosts a user may impersonate users, as well as users of which groups can be impersonated.
-
-<a href="../FalconDocumentation.html#Proxyuser_support">Proxyuser support described here.</a>
-
----+++Debug Mode
-
-If you export FALCON_DEBUG=true then the Falcon CLI will output the Web Services API details used by any commands you execute. This is useful for debugging purposes to or see how the Falcon CLI works with the WS API.
-Alternately, you can specify '-debug' through the CLI arguments to get the debug statements.
-Example:
-$FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml -debug
-
------------
-
----++Entity Management Commands
-
-| *Command*                                      | *Description*                                   |
-| [[Submit]]                                     | Submit the entity definition.                   |
-| [[Schedule]]                                   | Scheduled the entity                            |
-| [[SuspendEntity][Suspend]]                     | Suspends the scheduled entity                   |
-| [[ResumeEntity][Resume]]                       | Puts a suspended entity back in action          |
-| [[DeleteEntity][Delete]]                       | Remove the submitted entity                     |
-| [[ListEntity][List]]                           | Lists the particular type of entity             |
-| [[SummaryEntity][Summary]]                     | Shows summary of the type of entity             |
-| [[UpdateEntity][Update]]                       | Update already submitted entity                 |
-| [[Touch]]                                      | Force update already submitted entity           |
-| [[StatusEntity][Status]]                       | Return's the status of the entity               |
-| [[DependencyEntity][Dependency]]               | List all the entities on which the specified entity is dependent|
-| [[Definition]]                                 | Return's the definition of the entity           |
-| [[Lookup]]                                     | Return's the feed name for a path               |
-| [[SLAAlert]]                                   | Return's the feed instance which have missed sla|
-
-
------------
----++Instance Management Commands
-
-| *Command*                                      | *Description*                                   |
-| [[KillInstance][Kill]]                         | Kills all the instances of specified process    |
-| [[SuspendInstance][Suspend]]                   | Suspends instances of a specified process       |
-| [[ContinueInstance][Continue]]                 | Continue the failed workflow instances          |
-| [[RerunInstance][Rerun]]                       | Rerun instances of specified process            |
-| [[ResumeInstance][Resume]]                     | Resume instance of specified process from suspended state   |
-| [[StatusInstance][Status]]                     | Gets the status of entity                       |
-| [[ListInstance][List]]                         | Gets single or multiple instances               |
-| [[SummaryInstance][Summary]]                   | Gets consolidated status of the instances between the specified time period    |
-| [[RunningInstance][Running]]                   | Gets running instances of the mentioned process |
-| [[FeedInstanceListing]]                        | Gets falcon feed instance availability          |
-| [[LogsInstance][Logs]]                         | Gets logs for instance                          |
-| [[LifeCycleInstance][LifeCycle]]               | Describes list of life cycles of a entity       |
-| [[TriageInstance][Triage]]                     | Traces entities ancestors for failure           |
-| [[ParamsInstance][Params]]                     | Displays workflow params                        |
-| [[DependencyInstance][Dependency]]             | Displays the dependent instances    |
-
------------
-
----++Metadata Commands
-
-| *Command*                                      | *Description*                                    |
-|[[LineageMetadata][Lineage]]                    | Returns the relationship between processes and feeds |
-|[[VertexMetadata][Vertex]]                      | Gets the vertex with the specified id            |
-|[[VerticesMetadata][Vertices]]                  | Gets all vertices for a key                      |
-|[[VertexEdgesMetadata][Vertex Edges]]           | Gets the adjacent vertices or edges of the vertex|
-|[[EdgeMetadata][Edge]]                          | Gets the edge with the specified id              |
-|[[ListMetadata][List]]                          | Return list of all dimension of given type       |
-|[[RelationMetadata][Relations]]                | Return all dimensions related to specified Dimension |
-
------------
-
----++Admin Commands
-
-| *Command*                                      | *Description*                                   |
-|[[HelpAdmin][Help]]                             | Return help options                             |
-|[[VersionAdmin][Version]]                       | Return current falcon version                   |
-|[[StatusAdmin][Status]]                         | Return the status of falcon                     |
-
------------
-
----++Recipe Commands
-
-| *Command*                                      | *Description*                                   |
-|[[SubmitRecipe][Submit]]                        | Submit the specified Recipe                     |
-
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/FeedInstanceListing.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/FeedInstanceListing.twiki b/docs/src/site/twiki/falconcli/FeedInstanceListing.twiki
deleted file mode 100644
index aa60d49..0000000
--- a/docs/src/site/twiki/falconcli/FeedInstanceListing.twiki
+++ /dev/null
@@ -1,11 +0,0 @@
----+++FeedInstanceListing
-
-Get falcon feed instance availability.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type feed -name <<name>> -listing
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
--colo <<colo>>
-
-<a href="../Restapi/FeedInstanceListing.html">Optional params described here.</a>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/HelpAdmin.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/HelpAdmin.twiki b/docs/src/site/twiki/falconcli/HelpAdmin.twiki
deleted file mode 100644
index 69b1378..0000000
--- a/docs/src/site/twiki/falconcli/HelpAdmin.twiki
+++ /dev/null
@@ -1,6 +0,0 @@
----+++Help
-
-[[CommonCLI][Common CLI Options]]
-
-Usage:
-$FALCON_HOME/bin/falcon admin -help

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/KillInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/KillInstance.twiki b/docs/src/site/twiki/falconcli/KillInstance.twiki
deleted file mode 100644
index 623921f..0000000
--- a/docs/src/site/twiki/falconcli/KillInstance.twiki
+++ /dev/null
@@ -1,14 +0,0 @@
----+++Kill
-
-[[CommonCLI][Common CLI Options]]
-
-Kill sub-command is used to kill all the instances of the specified process whose nominal time is between the given start time and end time.
-
-Note:
-1. The start time and end time needs to be specified in TZ format.
-Example:   01 Jan 2012 01:00  => 2012-01-01T01:00Z
-
-2. Process name is compulsory parameter for each instance management command.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -kill -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/LifeCycleInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/LifeCycleInstance.twiki b/docs/src/site/twiki/falconcli/LifeCycleInstance.twiki
deleted file mode 100644
index bbcda55..0000000
--- a/docs/src/site/twiki/falconcli/LifeCycleInstance.twiki
+++ /dev/null
@@ -1,9 +0,0 @@
----+++LifeCycle
-
-[[CommonCLI][Common CLI Options]]
-
-Describes list of life cycles of a entity , for feed it can be replication/retention and for process it can be execution.
-This can be used with instance management options. Default values are replication for feed and execution for process.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status -lifecycle <<lifecycletype>> -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/LineageMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/LineageMetadata.twiki b/docs/src/site/twiki/falconcli/LineageMetadata.twiki
deleted file mode 100644
index e668e03..0000000
--- a/docs/src/site/twiki/falconcli/LineageMetadata.twiki
+++ /dev/null
@@ -1,12 +0,0 @@
----+++Lineage
-
-
-Returns the relationship between processes and feeds in a given pipeline in [[http://www.graphviz.org/content/dot-language/][dot]] format.
-You can use the output and view a graphical representation of DAG using an online graphviz viewer like [[http://www.webgraphviz.com/][this]].
-
-Usage:
-
-$FALCON_HOME/bin/falcon metadata -lineage -pipeline my-pipeline
-
-pipeline is a mandatory option.
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ListEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ListEntity.twiki b/docs/src/site/twiki/falconcli/ListEntity.twiki
deleted file mode 100644
index 0047c1b..0000000
--- a/docs/src/site/twiki/falconcli/ListEntity.twiki
+++ /dev/null
@@ -1,17 +0,0 @@
----+++List
-
-[[CommonCLI][Common CLI Options]]
-
-Entities of a particular type can be listed with list sub-command.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -list
-
-Optional Args : -fields <<field1,field2>>
--type <<[cluster|datasource|feed|process],[cluster|datasource|feed|process]>>
--nameseq <<namesubsequence>> -tagkeys <<tagkeyword1,tagkeyword2>>
--filterBy <<field1:value1,field2:value2>> -tags <<tagkey=tagvalue,tagkey=tagvalue>>
--orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="../Restapi/EntityList.html">Optional params described here.</a>
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ListInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ListInstance.twiki b/docs/src/site/twiki/falconcli/ListInstance.twiki
deleted file mode 100644
index 1203629..0000000
--- a/docs/src/site/twiki/falconcli/ListInstance.twiki
+++ /dev/null
@@ -1,20 +0,0 @@
----+++List
-
-[[CommonCLI][Common CLI Options]]
-
-List option via CLI can be used to get single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Instance time is also returned. Log location gives the oozie workflow url
-If the instance is in WAITING state, missing dependencies are listed
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}]}
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -list
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
--colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
--allAttempts To get all the attempts for corresponding instances
-
-<a href="../Restapi/InstanceList.html">Optional params described here.</a>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ListMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ListMetadata.twiki b/docs/src/site/twiki/falconcli/ListMetadata.twiki
deleted file mode 100644
index 8adea21..0000000
--- a/docs/src/site/twiki/falconcli/ListMetadata.twiki
+++ /dev/null
@@ -1,13 +0,0 @@
----+++ List
-
-[[CommonCLI][Common CLI Options]]
-
-Lists of all dimensions of given type. If the user provides optional param cluster, only the dimensions related to the cluster are listed.
-Usage:
-$FALCON_HOME/bin/falcon metadata -list -type [cluster_entity|datasource_entity|feed_entity|process_entity|user|colo|tags|groups|pipelines]
-
-Optional Args : -cluster <<cluster name>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -list -type process_entity -cluster primary-cluster
-$FALCON_HOME/bin/falcon metadata -list -type tags

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/LogsInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/LogsInstance.twiki b/docs/src/site/twiki/falconcli/LogsInstance.twiki
deleted file mode 100644
index ac40ec0..0000000
--- a/docs/src/site/twiki/falconcli/LogsInstance.twiki
+++ /dev/null
@@ -1,14 +0,0 @@
----+++Logs
-
-[[CommonCLI][Common CLI Options]]
-
-Get logs for instance actions
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -logs
-
-Optional Args : -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" -runid <<runid>>
--colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy field -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="../Restapi/InstanceLogs.html">Optional params described here.</a>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/Lookup.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/Lookup.twiki b/docs/src/site/twiki/falconcli/Lookup.twiki
deleted file mode 100644
index a9d9c4e..0000000
--- a/docs/src/site/twiki/falconcli/Lookup.twiki
+++ /dev/null
@@ -1,12 +0,0 @@
----+++Lookup
-
-[[CommonCLI][Common CLI Options]]
-
-Lookup option tells you which feed does a given path belong to. This can be useful in several scenarios e.g. generally you would want to have a single definition for common feeds like metadata with same location
-otherwise it can result in a problem (different retention durations can result in surprises for one team) If you want to check if there are multiple definitions of same metadata then you can pick
-an instance of that and run through the lookup command like below.
-
-Usage:
-$FALCON_HOME/bin/falcon entity -type feed -lookup -path /data/projects/my-hourly/2014/10/10/23/
-
-If you have multiple feeds with location as /data/projects/my-hourly/${YEAR}/${MONTH}/${DAY}/${HOUR} then this command will return all of them.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ParamsInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ParamsInstance.twiki b/docs/src/site/twiki/falconcli/ParamsInstance.twiki
deleted file mode 100644
index 9f217ba..0000000
--- a/docs/src/site/twiki/falconcli/ParamsInstance.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Params
-
-[[CommonCLI][Common CLI Options]]
-
-Displays the workflow params of a given instance. Where start time is considered as nominal time of that instance and end time won't be considered.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -params -start "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/RelationMetadata.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/RelationMetadata.twiki b/docs/src/site/twiki/falconcli/RelationMetadata.twiki
deleted file mode 100644
index e9bc970..0000000
--- a/docs/src/site/twiki/falconcli/RelationMetadata.twiki
+++ /dev/null
@@ -1,10 +0,0 @@
----+++ Relations
-
-[[CommonCLI][Common CLI Options]]
-
-List all dimensions related to specified Dimension identified by dimension-type and dimension-name.
-Usage:
-$FALCON_HOME/bin/falcon metadata -relations -type [cluster_entity|feed_entity|process_entity|user|colo|tags|groups|pipelines] -name <<Dimension Name>>
-
-Example:
-$FALCON_HOME/bin/falcon metadata -relations -type process_entity -name sample-process
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/RerunInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/RerunInstance.twiki b/docs/src/site/twiki/falconcli/RerunInstance.twiki
deleted file mode 100644
index aac844c..0000000
--- a/docs/src/site/twiki/falconcli/RerunInstance.twiki
+++ /dev/null
@@ -1,10 +0,0 @@
----+++Rerun
-
-[[CommonCLI][Common CLI Options]]
-
-Rerun option is used to rerun instances of a given process. On issuing a rerun, by default the execution resumes from the last failed node in the workflow. This option is valid only for process instances in terminal state, i.e. SUCCEEDED, KILLED or FAILED.
-If one wants to forcefully rerun the entire workflow, -force should be passed along with -rerun
-Additionally, you can also specify properties to override via a properties file.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -rerun -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" [-force] [-file <<properties file>>]

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ResumeEntity.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ResumeEntity.twiki b/docs/src/site/twiki/falconcli/ResumeEntity.twiki
deleted file mode 100644
index 39be411..0000000
--- a/docs/src/site/twiki/falconcli/ResumeEntity.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Resume
-
-[[CommonCLI][Common CLI Options]]
-
-Puts a suspended process/feed back to active, which in turn resumes applicable oozie bundle.
-
-Usage:
- $FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -resume

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/ResumeInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/ResumeInstance.twiki b/docs/src/site/twiki/falconcli/ResumeInstance.twiki
deleted file mode 100644
index 3790f47..0000000
--- a/docs/src/site/twiki/falconcli/ResumeInstance.twiki
+++ /dev/null
@@ -1,8 +0,0 @@
----+++Resume
-
-[[CommonCLI][Common CLI Options]]
-
-Resume option is used to resume any instance that  is in suspended state.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -resume -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/falconcli/RunningInstance.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/falconcli/RunningInstance.twiki b/docs/src/site/twiki/falconcli/RunningInstance.twiki
deleted file mode 100644
index f269358..0000000
--- a/docs/src/site/twiki/falconcli/RunningInstance.twiki
+++ /dev/null
@@ -1,13 +0,0 @@
----+++Running
-
-[[CommonCLI][Common CLI Options]]
-
-Running option provides all the running instances of the mentioned process.
-
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -running
-
-Optional Args : -colo <<colo>> -lifecycle <<lifecycles>>
--filterBy <<field1:value1,field2:value2>> -orderBy <<field>> -sortOrder <<sortOrder>> -offset 0 -numResults 10
-
-<a href="../Restapi/InstanceRunning.html">Optional params described here.</a>
\ No newline at end of file


[30/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
deleted file mode 100644
index 382a8c4..0000000
--- a/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
+++ /dev/null
@@ -1,1238 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.ClusterHelper;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.FeedHelper;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.Argument;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Location;
-import org.apache.falcon.entity.v0.feed.Locations;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.entity.v0.feed.MergeType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.feed.Partition;
-import org.apache.falcon.entity.v0.feed.Partitions;
-import org.apache.falcon.entity.v0.feed.Property;
-import org.apache.falcon.entity.v0.feed.Validity;
-import org.apache.falcon.group.FeedGroupMapTest;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.service.LifecyclePolicyMap;
-import org.apache.falcon.util.FalconTestUtil;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.hadoop.fs.Path;
-import org.mockito.Mockito;
-import org.testng.Assert;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Marshaller;
-import javax.xml.bind.Unmarshaller;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-import java.util.Map;
-
-import static org.testng.AssertJUnit.assertEquals;
-
-/**
- * Test Cases for Feed entity parser.
- */
-public class FeedEntityParserTest extends AbstractTestBase {
-
-    private final FeedEntityParser parser = (FeedEntityParser) EntityParserFactory
-            .getParser(EntityType.FEED);
-
-    private Feed modifiableFeed;
-
-    @BeforeMethod
-    public void setUp() throws Exception {
-        cleanupStore();
-        ConfigurationStore store = ConfigurationStore.get();
-
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
-        this.conf = dfsCluster.getConf();
-
-        Unmarshaller unmarshaller = EntityType.CLUSTER.getUnmarshaller();
-        Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
-                .getResourceAsStream(CLUSTER_XML));
-        cluster.setName("testCluster");
-        store.publish(EntityType.CLUSTER, cluster);
-
-        cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
-                .getResourceAsStream(CLUSTER_XML));
-        cluster.setName("backupCluster");
-        store.publish(EntityType.CLUSTER, cluster);
-
-        LifecyclePolicyMap.get().init();
-        CurrentUser.authenticate(FalconTestUtil.TEST_USER_2);
-        modifiableFeed = parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-        Unmarshaller dsUnmarshaller = EntityType.DATASOURCE.getUnmarshaller();
-        Datasource ds = (Datasource) dsUnmarshaller.unmarshal(this.getClass()
-                .getResourceAsStream(DATASOURCE_XML));
-        ds.setName("test-hsql-db");
-        store.publish(EntityType.DATASOURCE, ds);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidations() throws Exception {
-        ConfigurationStore.get().remove(EntityType.CLUSTER, "backupCluster");
-        parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-    }
-
-    @Test
-    public void testParse() throws IOException, FalconException, JAXBException {
-
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED_XML));
-
-        Assert.assertNotNull(feed);
-        assertEquals(feed.getName(), "clicks");
-        assertEquals(feed.getDescription(), "clicks log");
-        assertEquals(feed.getFrequency().toString(), "hours(1)");
-        assertEquals(feed.getSla().getSlaHigh().toString(), "hours(3)");
-        assertEquals(feed.getSla().getSlaLow().toString(), "hours(2)");
-        assertEquals(feed.getGroups(), "online,bi");
-
-        assertEquals(feed.getClusters().getClusters().get(0).getName(),
-                "testCluster");
-        assertEquals(feed.getClusters().getClusters().get(0).getSla().getSlaLow().toString(), "hours(3)");
-        assertEquals(feed.getClusters().getClusters().get(0).getSla().getSlaHigh().toString(), "hours(4)");
-
-        assertEquals(feed.getClusters().getClusters().get(0).getType(),
-                ClusterType.SOURCE);
-        assertEquals(SchemaHelper.formatDateUTC(feed.getClusters().getClusters().get(0).getValidity()
-                .getStart()), "2011-11-01T00:00Z");
-        assertEquals(SchemaHelper.formatDateUTC(feed.getClusters().getClusters().get(0).getValidity()
-                .getEnd()), "2011-12-31T00:00Z");
-        assertEquals(feed.getTimezone().getID(), "UTC");
-        assertEquals(feed.getClusters().getClusters().get(0).getRetention()
-                .getAction(), ActionType.DELETE);
-        assertEquals(feed.getClusters().getClusters().get(0).getRetention()
-                .getLimit().toString(), "hours(48)");
-
-        assertEquals(feed.getClusters().getClusters().get(1).getName(),
-                "backupCluster");
-        assertEquals(feed.getClusters().getClusters().get(1).getType(),
-                ClusterType.TARGET);
-        assertEquals(SchemaHelper.formatDateUTC(feed.getClusters().getClusters().get(1).getValidity()
-                .getStart()), "2011-11-01T00:00Z");
-        assertEquals(SchemaHelper.formatDateUTC(feed.getClusters().getClusters().get(1).getValidity()
-                .getEnd()), "2011-12-31T00:00Z");
-        assertEquals(feed.getClusters().getClusters().get(1).getRetention()
-                .getAction(), ActionType.DELETE);
-        assertEquals(feed.getClusters().getClusters().get(1).getRetention()
-                .getLimit().toString(), "hours(6)");
-
-        assertEquals("${nameNode}/projects/falcon/clicks",
-                FeedHelper.createStorage(feed).getUriTemplate(LocationType.DATA));
-        assertEquals("${nameNode}/projects/falcon/clicksMetaData",
-                FeedHelper.createStorage(feed).getUriTemplate(LocationType.META));
-        assertEquals("${nameNode}/projects/falcon/clicksStats",
-                FeedHelper.createStorage(feed).getUriTemplate(LocationType.STATS));
-
-        assertEquals(feed.getACL().getGroup(), "group");
-        assertEquals(feed.getACL().getOwner(), FalconTestUtil.TEST_USER_2);
-        assertEquals(feed.getACL().getPermission(), "0x755");
-
-        assertEquals(feed.getSchema().getLocation(), "/schema/clicks");
-        assertEquals(feed.getSchema().getProvider(), "protobuf");
-
-        StringWriter stringWriter = new StringWriter();
-        Marshaller marshaller = EntityType.FEED.getMarshaller();
-        marshaller.marshal(feed, stringWriter);
-        System.out.println(stringWriter.toString());
-    }
-
-    @Test
-    public void testLifecycleParse() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED3_XML));
-        assertEquals("hours(17)", feed.getLifecycle().getRetentionStage().getFrequency().toString());
-        assertEquals("AgeBasedDelete", FeedHelper.getPolicies(feed, "testCluster").get(0));
-        assertEquals("reports", feed.getLifecycle().getRetentionStage().getQueue());
-        assertEquals("NORMAL", feed.getLifecycle().getRetentionStage().getPriority());
-    }
-
-    @Test(expectedExceptions = ValidationException.class,
-            expectedExceptionsMessageRegExp = ".*Retention is a mandatory stage.*")
-    public void testMandatoryRetention() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED3_XML));
-        feed.getLifecycle().setRetentionStage(null);
-        parser.validate(feed);
-    }
-
-    @Test
-    public void testValidRetentionFrequency() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED3_XML));
-
-        feed.setFrequency(Frequency.fromString("minutes(30)"));
-        Frequency frequency = Frequency.fromString("minutes(60)");
-        feed.getLifecycle().getRetentionStage().setFrequency(frequency);
-        parser.validate(feed); // no validation exception should be thrown
-
-        frequency = Frequency.fromString("hours(1)");
-        feed.getLifecycle().getRetentionStage().setFrequency(frequency);
-        parser.validate(feed); // no validation exception should be thrown
-    }
-
-    @Test
-    public void testDefaultRetentionFrequencyConflict() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass().getResourceAsStream(FEED3_XML));
-        feed.getLifecycle().getRetentionStage().setFrequency(null);
-        feed.getClusters().getClusters().get(0).getLifecycle().getRetentionStage().setFrequency(null);
-        feed.setFrequency(Frequency.fromString("minutes(10)"));
-        parser.validate(feed); // shouldn't throw a validation exception
-
-
-        feed.setFrequency(Frequency.fromString("hours(7)"));
-        parser.validate(feed); // shouldn't throw a validation exception
-
-        feed.setFrequency(Frequency.fromString("days(2)"));
-        parser.validate(feed); // shouldn't throw a validation exception
-    }
-
-    @Test(expectedExceptions = ValidationException.class,
-        expectedExceptionsMessageRegExp = ".*Retention can not be more frequent than data availability.*")
-    public void testRetentionFrequentThanFeed() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED3_XML));
-
-        feed.setFrequency(Frequency.fromString("hours(2)"));
-        Frequency frequency = Frequency.fromString("minutes(60)");
-        feed.getLifecycle().getRetentionStage().setFrequency(frequency);
-        parser.validate(feed);
-    }
-
-    @Test(expectedExceptions = ValidationException.class,
-        expectedExceptionsMessageRegExp = ".*Feed Retention can not be more frequent than.*")
-    public void testRetentionFrequency() throws Exception {
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED3_XML));
-
-        feed.setFrequency(Frequency.fromString("minutes(30)"));
-        Frequency frequency = Frequency.fromString("minutes(59)");
-        feed.getLifecycle().getRetentionStage().setFrequency(frequency);
-        parser.validate(feed);
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void applyValidationInvalidFeed() throws Exception {
-        Feed feed = parser.parseAndValidate(ProcessEntityParserTest.class
-                .getResourceAsStream(FEED_XML));
-        feed.getClusters().getClusters().get(0).setName("invalid cluster");
-        parser.validate(feed);
-    }
-
-    @Test
-    public void testPartitionExpression() throws FalconException {
-        Feed feed = parser.parseAndValidate(ProcessEntityParserTest.class
-                .getResourceAsStream(FEED_XML));
-
-        //When there are more than 1 src clusters, there should be partition expression
-        org.apache.falcon.entity.v0.feed.Cluster newCluster = new org.apache.falcon.entity.v0.feed.Cluster();
-        newCluster.setName("newCluster");
-        newCluster.setType(ClusterType.SOURCE);
-        newCluster.setPartition("${cluster.colo}");
-        feed.getClusters().getClusters().add(newCluster);
-        try {
-            parser.validate(feed);
-            Assert.fail("Expected ValidationException");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-
-        //When there are more than 1 src clusters, the partition expression should contain cluster variable
-        feed.getClusters().getClusters().get(0).setPartition("*");
-        try {
-            parser.validate(feed);
-            Assert.fail("Expected ValidationException");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-
-        //When there are more than 1 target cluster, there should be partition expre
-        newCluster.setType(ClusterType.TARGET);
-        try {
-            parser.validate(feed);
-            Assert.fail("Expected ValidationException");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-
-        //When there are more than 1 target clusters, the partition expression should contain cluster variable
-        feed.getClusters().getClusters().get(1).setPartition("*");
-        try {
-            parser.validate(feed);
-            Assert.fail("Expected ValidationException");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-
-        //Number of parts in partition expression < number of partitions defined for feed
-        feed.getClusters().getClusters().get(1).setPartition("*/*");
-        try {
-            parser.validate(feed);
-            Assert.fail("Expected ValidationException");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-
-        feed.getClusters().getClusters().get(0).setPartition(null);
-        feed.getClusters().getClusters().get(1).setPartition(null);
-        feed.getClusters().getClusters().remove(2);
-        feed.setPartitions(null);
-        parser.validate(feed);
-    }
-
-    @Test
-    public void testInvalidClusterValidityTime() {
-        Validity validity = modifiableFeed.getClusters().getClusters().get(0)
-                .getValidity();
-        try {
-            validity.setStart(SchemaHelper.parseDateUTC("2007-02-29T00:00Z"));
-            modifiableFeed.getClusters().getClusters().get(0)
-                    .setValidity(validity);
-            parser.parseAndValidate(marshallEntity(modifiableFeed));
-            Assert.fail("Cluster validity failed");
-        } catch (Exception e) {
-            System.out.println(e.getMessage());
-            validity.setStart(SchemaHelper.parseDateUTC("2011-11-01T00:00Z"));
-            modifiableFeed.getClusters().getClusters().get(0)
-                    .setValidity(validity);
-        }
-
-        try {
-            validity.setEnd(SchemaHelper.parseDateUTC("2010-04-31T00:00Z"));
-            modifiableFeed.getClusters().getClusters().get(0)
-                    .setValidity(validity);
-            parser.parseAndValidate(marshallEntity(modifiableFeed));
-            Assert.fail("Cluster validity failed");
-        } catch (Exception e) {
-            System.out.println(e.getMessage());
-            validity.setEnd(SchemaHelper.parseDateUTC("2011-12-31T00:00Z"));
-            modifiableFeed.getClusters().getClusters().get(0)
-                    .setValidity(validity);
-        }
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInvalidProcessValidity() throws Exception {
-        Feed feed = parser.parseAndValidate((FeedEntityParserTest.class
-                .getResourceAsStream(FEED_XML)));
-        feed.getClusters().getClusters().get(0).getValidity()
-                .setStart(SchemaHelper.parseDateUTC("2012-11-01T00:00Z"));
-        parser.validate(feed);
-    }
-
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = "slaLow of Feed:.*")
-    public void testInvalidSlaLow() throws Exception {
-        Feed feed = parser.parseAndValidate((FeedEntityParserTest.class
-                .getResourceAsStream(FEED_XML)));
-        feed.getSla().setSlaLow(new Frequency("hours(4)"));
-        feed.getSla().setSlaHigh(new Frequency("hours(2)"));
-        parser.validate(feed);
-    }
-
-
-    @Test(expectedExceptions = ValidationException.class, expectedExceptionsMessageRegExp = "slaHigh of Feed:.*")
-    public void testInvalidSlaHigh() throws Exception {
-        Feed feed = parser.parseAndValidate((FeedEntityParserTest.class
-                .getResourceAsStream(FEED_XML)));
-        feed.getSla().setSlaLow(new Frequency("hours(2)"));
-        feed.getSla().setSlaHigh(new Frequency("hours(10)"));
-        feed.getClusters().getClusters().get(0).getRetention().setLimit(new Frequency("hours(9)"));
-        parser.validate(feed);
-    }
-
-
-    @Test
-    public void testValidFeedGroup() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        feed1.setName("f1" + System.currentTimeMillis());
-        feed1.setGroups("group1,group2,group3");
-        feed1.setLocations(new Locations());
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.getLocations().getLocations().add(location);
-        feed1.getClusters().getClusters().get(0).getLocations().getLocations().set(0, location);
-        parser.parseAndValidate(feed1.toString());
-        ConfigurationStore.get().publish(EntityType.FEED, feed1);
-
-        Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        feed2.setName("f2" + System.currentTimeMillis());
-        feed2.setGroups("group1,group2,group5");
-        feed2.setLocations(new Locations());
-        Location location2 = new Location();
-        location2
-                .setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location2.setType(LocationType.DATA);
-        feed2.getLocations().getLocations().add(location2);
-        feed2.getClusters().getClusters().get(0).getLocations().getLocations().set(0, location);
-        parser.parseAndValidate(feed2.toString());
-    }
-
-    // TODO Disabled the test since I do not see anything invalid in here.
-    @Test(enabled = false, expectedExceptions = ValidationException.class)
-    public void testInvalidFeedClusterDataLocation() throws JAXBException, FalconException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        feed1.setName("f1" + System.currentTimeMillis());
-        feed1.setGroups("group1,group2,group3");
-        feed1.setLocations(new Locations());
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.getLocations().getLocations().add(location);
-        parser.parseAndValidate(feed1.toString());
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testInvalidFeedGroup() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        feed1.setName("f1" + System.currentTimeMillis());
-        feed1.setGroups("group1,group2,group3");
-        feed1.setLocations(new Locations());
-        Location location = new Location();
-        location.setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}-${DAY}/ad");
-        location.setType(LocationType.DATA);
-        feed1.getLocations().getLocations().add(location);
-        parser.parseAndValidate(feed1.toString());
-
-        feed1.getClusters().getClusters().get(0).getLocations().getLocations().set(0, location);
-        ConfigurationStore.get().publish(EntityType.FEED, feed1);
-
-        Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        feed2.setName("f2" + System.currentTimeMillis());
-        feed2.setGroups("group1,group2,group5");
-        feed2.setLocations(new Locations());
-        Location location2 = new Location();
-        location2
-                .setPath("/projects/bi/rmc/daily/ad/${YEAR}/fraud/${MONTH}/${HOUR}/ad");
-        location2.setType(LocationType.DATA);
-        feed2.getLocations().getLocations().add(location2);
-        feed2.getClusters().getClusters().get(0).getLocations().getLocations().set(0, location);
-        parser.parseAndValidate(feed2.toString());
-    }
-
-    @Test
-    public void testValidGroupNames() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("f1" + System.currentTimeMillis());
-        feed1.setGroups("group7,group8");
-        parser.parseAndValidate(feed1.toString());
-
-        feed1.setGroups("group7");
-        parser.parseAndValidate(feed1.toString());
-
-        feed1.setGroups(null);
-        parser.parseAndValidate(feed1.toString());
-        ConfigurationStore.get().publish(EntityType.FEED, feed1);
-    }
-
-    @Test
-    public void testInvalidGroupNames() throws FalconException, JAXBException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("f1" + System.currentTimeMillis());
-
-        try {
-            feed1.setGroups("commaend,");
-            parser.parseAndValidate(feed1.toString());
-            Assert.fail("Expected exception");
-        } catch (FalconException ignore) {
-            //ignore
-        }
-        try {
-            feed1.setGroups("group8,   group9");
-            parser.parseAndValidate(feed1.toString());
-            Assert.fail("Expected exception");
-        } catch (FalconException e) {
-            //ignore
-        }
-        try {
-            feed1.setGroups("space in group,group9");
-            parser.parseAndValidate(feed1.toString());
-            Assert.fail("Expected exception");
-        } catch (FalconException e) {
-            //ignore
-        }
-    }
-
-    @Test
-    public void testClusterPartitionExp() throws FalconException {
-        Cluster cluster = ConfigurationStore.get().get(EntityType.CLUSTER,
-                "testCluster");
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
-                "/*/${cluster.colo}"), "/*/" + cluster.getColo());
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
-                "/*/${cluster.name}/Local"), "/*/" + cluster.getName() + "/Local");
-        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
-                "/*/${cluster.field1}/Local"), "/*/value1/Local");
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testInvalidFeedName() throws JAXBException, FalconException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("Feed_name");
-        parser.parseAndValidate(feed1.toString());
-    }
-
-    @Test(expectedExceptions = FalconException.class)
-    public void testInvalidFeedGroupName() throws JAXBException, FalconException {
-        Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed1.setName("feed1");
-        feed1.getLocations().getLocations().get(0)
-                .setPath("/data/clicks/${YEAR}/${MONTH}/${DAY}/${HOUR}");
-        feed1.getClusters().getClusters().get(0).getLocations().getLocations()
-                .get(0).setPath("/data/clicks/${YEAR}/${MONTH}/${DAY}/${HOUR}");
-        ConfigurationStore.get().publish(EntityType.FEED, feed1);
-
-        Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-        feed2.setName("feed2");
-        feed2.getLocations().getLocations().get(0).setPath("/data/clicks/${YEAR}/${MONTH}/${DAY}/${HOUR}");
-        feed2.getClusters().getClusters().get(0).getLocations().getLocations()
-                .get(0).setPath("/data/clicks/${YEAR}/${MONTH}/${DAY}/${HOUR}");
-        feed2.setFrequency(new Frequency("hours(1)"));
-        try {
-            parser.parseAndValidate(feed2.toString());
-        } catch (FalconException e) {
-            e.printStackTrace();
-            Assert.fail("Not expecting exception for same frequency");
-        }
-        feed2.setFrequency(new Frequency("hours(2)"));
-        //expecting exception
-        parser.parseAndValidate(feed2.toString());
-    }
-
-    @Test
-    public void testNullFeedLateArrival() throws JAXBException, FalconException {
-        Feed feed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                FeedGroupMapTest.class
-                        .getResourceAsStream("/config/feed/feed-0.1.xml"));
-
-        feed.setLateArrival(null);
-        parser.parseAndValidate(feed.toString());
-
-    }
-
-    /**
-     * A negative test for validating tags key value pair regex: key=value, key=value.
-     * @throws FalconException
-     */
-    @Test
-    public void testFeedTags() throws FalconException {
-        try {
-            InputStream stream = this.getClass().getResourceAsStream("/config/feed/feed-tags-0.1.xml");
-            parser.parse(stream);
-            Assert.fail("org.xml.sax.SAXParseException should have been thrown.");
-        } catch (FalconException e) {
-            Assert.assertEquals(javax.xml.bind.UnmarshalException.class, e.getCause().getClass());
-            Assert.assertEquals(org.xml.sax.SAXParseException.class, e.getCause().getCause().getClass());
-        }
-    }
-
-    @Test
-    public void testParseFeedWithTable() throws FalconException {
-        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
-        Feed feedWithTable = parser.parse(inputStream);
-        Assert.assertEquals(feedWithTable.getTable().getUri(),
-                "catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}");
-    }
-
-    @Test (expectedExceptions = FalconException.class)
-    public void testParseInvalidFeedWithTable() throws FalconException {
-        parser.parse(FeedEntityParserTest.class.getResourceAsStream("/config/feed/invalid-feed.xml"));
-    }
-
-    @Test (expectedExceptions = FalconException.class)
-    public void testValidateFeedWithTableAndMultipleSources() throws FalconException {
-        parser.parseAndValidate(FeedEntityParserTest.class.getResourceAsStream(
-                "/config/feed/table-with-multiple-sources-feed.xml"));
-        Assert.fail("Should have thrown an exception:Multiple sources are not supported for feed with table storage");
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidatePartitionsForTable() throws Exception {
-        Feed feed = parser.parse(FeedEntityParserTest.class.getResourceAsStream("/config/feed/hive-table-feed.xml"));
-        Assert.assertNull(feed.getPartitions());
-
-        Partitions partitions = new Partitions();
-        Partition partition = new Partition();
-        partition.setName("colo");
-        partitions.getPartitions().add(partition);
-        feed.setPartitions(partitions);
-
-        parser.validate(feed);
-        Assert.fail("An exception should have been thrown:Partitions are not supported for feeds with table storage");
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidateClusterHasRegistryWithNoRegistryInterfaceEndPoint() throws Exception {
-        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
-        Feed feedWithTable = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.cluster.Cluster clusterEntity = EntityUtil.getEntity(EntityType.CLUSTER,
-                feedWithTable.getClusters().getClusters().get(0).getName());
-        ClusterHelper.getInterface(clusterEntity, Interfacetype.REGISTRY).setEndpoint(null);
-
-        parser.validate(feedWithTable);
-        Assert.fail("An exception should have been thrown: Cluster should have registry interface defined with table "
-                + "storage");
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidateClusterHasRegistryWithNoRegistryInterface() throws Exception {
-        Unmarshaller unmarshaller = EntityType.CLUSTER.getUnmarshaller();
-        Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
-                .getResourceAsStream(("/config/cluster/cluster-no-registry.xml")));
-        cluster.setName("badTestCluster");
-        ConfigurationStore.get().publish(EntityType.CLUSTER, cluster);
-
-
-        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
-        Feed feedWithTable = parser.parse(inputStream);
-        Validity validity = modifiableFeed.getClusters().getClusters().get(0)
-                .getValidity();
-        feedWithTable.getClusters().getClusters().clear();
-
-        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                new org.apache.falcon.entity.v0.feed.Cluster();
-        feedCluster.setName(cluster.getName());
-        feedCluster.setValidity(validity);
-        feedWithTable.getClusters().getClusters().add(feedCluster);
-
-        parser.validate(feedWithTable);
-        Assert.fail("An exception should have been thrown: Cluster should have registry interface defined with table"
-                + " storage");
-    }
-
-    @Test(expectedExceptions = ValidationException.class)
-    public void testValidateOwner() throws Exception {
-        CurrentUser.authenticate("unknown");
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        try {
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            feedEntityParser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test
-    public void testValidateACLWithACLAndAuthorizationDisabled() throws Exception {
-        InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-        Feed feed = parser.parse(stream);
-        Assert.assertNotNull(feed);
-        Assert.assertNotNull(feed.getACL());
-        Assert.assertNotNull(feed.getACL().getOwner());
-        Assert.assertNotNull(feed.getACL().getGroup());
-        Assert.assertNotNull(feed.getACL().getPermission());
-
-        parser.validate(feed);
-    }
-
-    @Test
-    public void testValidateACLOwner() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        CurrentUser.authenticate(USER);
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            Feed feed = feedEntityParser.parse(stream);
-
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            feed.getACL().setOwner(USER);
-            feed.getACL().setGroup(getPrimaryGroupName());
-
-            feedEntityParser.validate(feed);
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLBadOwner() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate("blah");
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            Feed feed = feedEntityParser.parse(stream);
-
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            feedEntityParser.validate(feed);
-            Assert.fail("Validation exception should have been thrown for invalid owner");
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLBadOwnerAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-        CurrentUser.authenticate("blah");
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            Feed feed = feedEntityParser.parse(stream);
-
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            feedEntityParser.validate(feed);
-            Assert.fail("Validation exception should have been thrown for invalid owner");
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAndStorageBadOwner() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        Feed feed = null;
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            // create locations
-            createLocations(feed);
-            feedEntityParser.validate(feed);
-        } finally {
-            if (feed != null) {
-                deleteLocations(feed);
-            }
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAndStorageBadOwnerAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        Feed feed = null;
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                    (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            // create locations
-            createLocations(feed);
-            feedEntityParser.validate(feed);
-        } finally {
-            if (feed != null) {
-                deleteLocations(feed);
-            }
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAndStorageForValidOwnerBadGroup() throws Exception {
-        CurrentUser.authenticate(USER);
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        Feed feed = null;
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser = (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            feed.getACL().setOwner(USER);
-
-            // create locations
-            createLocations(feed);
-            feedEntityParser.validate(feed);
-        } finally {
-            if (feed != null) {
-                deleteLocations(feed);
-            }
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLValidGroupBadOwner() throws Exception {
-        CurrentUser.authenticate(USER);
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser = (FeedEntityParser) EntityParserFactory.getParser(
-                    EntityType.FEED);
-            Feed feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            feed.getACL().setGroup(getPrimaryGroupName());
-
-            feedEntityParser.validate(feed);
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAndStorageForInvalidOwnerAndGroup() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        Feed feed = null;
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser = (FeedEntityParser) EntityParserFactory.getParser(
-                    EntityType.FEED);
-            feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            // create locations
-            createLocations(feed);
-            feedEntityParser.validate(feed);
-        } finally {
-            if (feed != null) {
-                deleteLocations(feed);
-            }
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testValidateACLAndStorageForValidGroupBadOwner() throws Exception {
-        CurrentUser.authenticate(USER);
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        Feed feed = null;
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser = (FeedEntityParser) EntityParserFactory.getParser(
-                    EntityType.FEED);
-            feed = feedEntityParser.parse(stream);
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            Assert.assertNotNull(feed.getACL().getOwner());
-            Assert.assertNotNull(feed.getACL().getGroup());
-            Assert.assertNotNull(feed.getACL().getPermission());
-
-            feed.getACL().setGroup(getPrimaryGroupName());
-
-            // create locations
-            createLocations(feed);
-            feedEntityParser.validate(feed);
-        } finally {
-            if (feed != null) {
-                deleteLocations(feed);
-            }
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    private void createLocations(Feed feed) throws IOException {
-        for (Location location : feed.getLocations().getLocations()) {
-            if (location.getType() == LocationType.DATA) {
-                dfsCluster.getFileSystem().create(new Path(location.getPath()));
-                break;
-            }
-        }
-    }
-
-    private void deleteLocations(Feed feed) throws IOException {
-        for (Location location : feed.getLocations().getLocations()) {
-            if (location.getType() == LocationType.DATA) {
-                dfsCluster.getFileSystem().delete(new Path(location.getPath()), true);
-                break;
-            }
-        }
-    }
-
-    @Test
-    public void testValidateACLForArchiveReplication() throws Exception {
-        StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
-        Assert.assertTrue(Boolean.valueOf(
-                StartupProperties.get().getProperty("falcon.security.authorization.enabled")));
-
-        CurrentUser.authenticate(USER);
-        try {
-            InputStream stream = this.getClass().getResourceAsStream(FEED_XML);
-
-            // need a new parser since it caches authorization enabled flag
-            FeedEntityParser feedEntityParser =
-                (FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED);
-            Feed feed = feedEntityParser.parse(stream);
-
-            org.apache.falcon.entity.v0.feed.Cluster feedCluster =
-                FeedHelper.getCluster(feed, "backupCluster");
-            Location location = new Location();
-            location.setType(LocationType.DATA);
-            location.setPath(
-                "s3://falcontesting@hwxasvtesting.blob.core.windows.net/${YEAR}-${MONTH}-${DAY}-${HOUR}-${MINUTE}");
-            Locations locations = new Locations();
-            locations.getLocations().add(location);
-            feedCluster.setLocations(locations);
-
-            Assert.assertNotNull(feed);
-            Assert.assertNotNull(feed.getACL());
-            feed.getACL().setOwner(USER);
-            feed.getACL().setGroup(getPrimaryGroupName());
-
-            try {
-                feedEntityParser.validate(feed);
-            } catch (IllegalArgumentException e) {
-                // this is normal since AWS Secret Access Key is not specified as the password of a s3 URL
-            }
-        } finally {
-            StartupProperties.get().setProperty("falcon.security.authorization.enabled", "false");
-        }
-    }
-
-    @Test
-    public void testImportFeedSqoop() throws Exception {
-
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        InputStream feedStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed feed = parser.parseAndValidate(feedStream);
-        final org.apache.falcon.entity.v0.feed.Cluster srcCluster = feed.getClusters().getClusters().get(0);
-        Assert.assertEquals("test-hsql-db", FeedHelper.getImportDatasourceName(srcCluster));
-        Assert.assertEquals("customer", FeedHelper.getImportDataSourceTableName(srcCluster));
-        Assert.assertEquals(2, srcCluster.getImport().getSource().getFields().getIncludes().getFields().size());
-    }
-
-    @Test
-    public void testImportFeedSqoopMinimal() throws Exception {
-
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        InputStream feedStream = this.getClass().getResourceAsStream("/config/feed/feed-import-noargs-0.1.xml");
-        Feed feed = parser.parseAndValidate(feedStream);
-        final org.apache.falcon.entity.v0.feed.Cluster srcCluster = feed.getClusters().getClusters().get(0);
-        Assert.assertEquals("test-hsql-db", FeedHelper.getImportDatasourceName(srcCluster));
-        Assert.assertEquals("customer", FeedHelper.getImportDataSourceTableName(srcCluster));
-        Map<String, String> args = FeedHelper.getImportArguments(srcCluster);
-        Assert.assertEquals(0, args.size());
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testImportFeedSqoopExcludeFields() throws Exception {
-
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        InputStream feedStream = this.getClass().getResourceAsStream("/config/feed/feed-import-exclude-fields-0.1.xml");
-        Feed feed = parser.parseAndValidate(feedStream);
-        Assert.fail("An exception should have been thrown: Feed Import policy not yet implement Field exclusion.");
-    }
-
-    @Test
-    public void testImportFeedSqoopArgs() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Arguments args =
-                importFeed.getClusters().getClusters().get(0).getImport().getArguments();
-
-        Argument splitByArg = new Argument();
-        splitByArg.setName("--split-by");
-        splitByArg.setValue("id");
-
-        Argument numMappersArg = new Argument();
-        numMappersArg.setName("--num-mappers");
-        numMappersArg.setValue("3");
-
-        args.getArguments().clear();
-        args.getArguments().add(numMappersArg);
-        args.getArguments().add(splitByArg);
-
-        parser.validate(importFeed);
-    }
-
-    @Test
-    public void testImportFeedSqoopArgsSplitBy() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Arguments args =
-                importFeed.getClusters().getClusters().get(0).getImport().getArguments();
-        Argument splitByArg = new Argument();
-        splitByArg.setName("--split-by");
-        splitByArg.setValue("id");
-
-        args.getArguments().clear();
-        args.getArguments().add(splitByArg);
-
-        parser.validate(importFeed);
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testImportFeedSqoopArgsNumMapper() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Arguments args =
-                importFeed.getClusters().getClusters().get(0).getImport().getArguments();
-        Argument numMappersArg = new Argument();
-        numMappersArg.setName("--num-mappers");
-        numMappersArg.setValue("2");
-
-        args.getArguments().clear();
-        args.getArguments().add(numMappersArg);
-
-        parser.validate(importFeed);
-        Assert.fail("An exception should have been thrown: Feed Import should specify "
-                + "--split-by column along with --num-mappers");
-    }
-
-    @Test
-    public void testImportFeedExtractionType1() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Extract extract =
-                importFeed.getClusters().getClusters().get(0).getImport().getSource().getExtract();
-
-        extract.setType(ExtractMethod.FULL);
-        extract.setMergepolicy(MergeType.SNAPSHOT);
-
-        parser.validate(importFeed);
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testImportFeedExtractionType2() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Extract extract =
-                importFeed.getClusters().getClusters().get(0).getImport().getSource().getExtract();
-
-        extract.setType(ExtractMethod.FULL);
-        extract.setMergepolicy(MergeType.APPEND);
-
-        parser.validate(importFeed);
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testImportFeedExtractionType3() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-import-0.1.xml");
-        Feed importFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Extract extract =
-                importFeed.getClusters().getClusters().get(0).getImport().getSource().getExtract();
-
-        extract.setType(ExtractMethod.INCREMENTAL);
-        extract.setMergepolicy(MergeType.APPEND);
-
-        parser.validate(importFeed);
-    }
-
-    @Test (expectedExceptions = {ValidationException.class, FalconException.class})
-    public void testImportFeedSqoopInvalid() throws Exception {
-
-        InputStream feedStream = this.getClass().getResourceAsStream("/config/feed/feed-import-invalid-0.1.xml");
-        parser.parseAndValidate(feedStream);
-        Assert.fail("ValidationException should have been thrown");
-    }
-
-    public void testValidateEmailNotification() throws Exception {
-        Feed feedNotification = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(
-                (FeedEntityParserTest.class.getResourceAsStream(FEED_XML)));
-        Assert.assertNotNull(feedNotification.getNotification());
-        Assert.assertEquals(feedNotification.getNotification().getTo(), "falcon@localhost");
-        Assert.assertEquals(feedNotification.getNotification().getType(), "email");
-    }
-
-    @Test
-    public void testValidateFeedProperties() throws Exception {
-        FeedEntityParser feedEntityParser = Mockito
-                .spy((FeedEntityParser) EntityParserFactory.getParser(EntityType.FEED));
-        InputStream stream = this.getClass().getResourceAsStream("/config/feed/feed-0.1.xml");
-        Feed feed = parser.parse(stream);
-
-        Mockito.doNothing().when(feedEntityParser).validateACL(feed);
-
-        // Good set of properties, should work
-        feedEntityParser.validate(feed);
-
-        // add duplicate property, should throw validation exception.
-        Property property1 = new Property();
-        property1.setName("field1");
-        property1.setValue("any value");
-        feed.getProperties().getProperties().add(property1);
-        try {
-            feedEntityParser.validate(feed);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-
-        // Remove duplicate property. It should not throw exception anymore
-        feed.getProperties().getProperties().remove(property1);
-        feedEntityParser.validate(feed);
-
-        // add empty property name, should throw validation exception.
-        property1.setName("");
-        feed.getProperties().getProperties().add(property1);
-        try {
-            feedEntityParser.validate(feed);
-            Assert.fail(); // should not reach here
-        } catch (ValidationException e) {
-            // Do nothing
-        }
-    }
-
-    @Test
-    public void testFeedEndTimeOptional() throws Exception {
-        Feed feed = parser.parseAndValidate(ProcessEntityParserTest.class
-                .getResourceAsStream(FEED_XML));
-        feed.getClusters().getClusters().get(0).getValidity().setEnd(null);
-        parser.validate(feed);
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testExportFeedSqoopExcludeFields() throws Exception {
-
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        InputStream feedStream = this.getClass().getResourceAsStream("/config/feed/feed-export-exclude-fields-0.1.xml");
-        Feed feed = parser.parseAndValidate(feedStream);
-        Assert.fail("An exception should have been thrown: Feed Export policy not yet implement Field exclusion.");
-    }
-
-    @Test (expectedExceptions = ValidationException.class)
-    public void testExportFeedSqoopArgsNumMapper() throws Exception {
-        final InputStream inputStream = this.getClass().getResourceAsStream("/config/feed/feed-export-0.1.xml");
-        Feed exportFeed = parser.parse(inputStream);
-
-        org.apache.falcon.entity.v0.feed.Arguments args =
-                exportFeed.getClusters().getClusters().get(0).getExport().getArguments();
-        Argument numMappersArg = new Argument();
-        numMappersArg.setName("--split-by");
-        numMappersArg.setValue("id");
-
-        args.getArguments().clear();
-        args.getArguments().add(numMappersArg);
-
-        parser.validate(exportFeed);
-        Assert.fail("An exception should have been thrown: Feed export should specify --split-by");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/test/java/org/apache/falcon/entity/parser/FeedUpdateTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/FeedUpdateTest.java b/common/src/test/java/org/apache/falcon/entity/parser/FeedUpdateTest.java
deleted file mode 100644
index 3bf0b58..0000000
--- a/common/src/test/java/org/apache/falcon/entity/parser/FeedUpdateTest.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity.parser;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.entity.AbstractTestBase;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.hadoop.fs.Path;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-/**
- * Test for feed update helper methods.
- */
-public class FeedUpdateTest extends AbstractTestBase {
-
-    private final FeedEntityParser parser = (FeedEntityParser)
-            EntityParserFactory.getParser(EntityType.FEED);
-    private final ProcessEntityParser processParser = (ProcessEntityParser)
-            EntityParserFactory.getParser(EntityType.PROCESS);
-    private static final String FEED1_XML = "/config/feed/feed-0.2.xml";
-    private static final String PROCESS1_XML = "/config/process/process-0.2.xml";
-
-    @BeforeClass
-    public void init() throws Exception {
-        this.dfsCluster = EmbeddedCluster.newCluster("testCluster");
-        this.conf = dfsCluster.getConf();
-        setup();
-    }
-
-    @AfterClass
-    public void tearDown() {
-        this.dfsCluster.shutdown();
-    }
-
-    @Override
-    public void setup() throws Exception {
-        storeEntity(EntityType.CLUSTER, "testCluster");
-        storeEntity(EntityType.CLUSTER, "backupCluster");
-        storeEntity(EntityType.CLUSTER, "corp");
-        storeEntity(EntityType.FEED, "impressions");
-    }
-
-    @Test
-    public void testFeedUpdateWithNoDependentProcess() {
-        try {
-            parser.parseAndValidate(this.getClass()
-                    .getResourceAsStream(FEED_XML));
-        } catch (FalconException e) {
-            Assert.fail("Didn't expect feed parsing to fail", e);
-        }
-
-    }
-
-    @Test
-    public void testFeedUpdateWithOneDependentProcess() {
-        try {
-            ConfigurationStore.get().remove(EntityType.FEED, "clicks");
-            ConfigurationStore.get().remove(EntityType.PROCESS, "sample");
-            Feed feed = parser.parseAndValidate(this.getClass()
-                    .getResourceAsStream(FEED_XML));
-            ConfigurationStore.get().publish(EntityType.FEED, feed);
-            storeEntity(EntityType.PROCESS, "sample");
-
-            //Try parsing the same feed xml
-            parser.parseAndValidate(this.getClass().getResourceAsStream(FEED_XML));
-        } catch (Exception e) {
-            Assert.fail("Didn't expect feed parsing to fail", e);
-        }
-    }
-
-    @Test
-    public void testFeedUpdateWithMultipleDependentProcess() {
-        try {
-            ConfigurationStore.get().remove(EntityType.FEED, "clicks");
-            ConfigurationStore.get().remove(EntityType.PROCESS, "sample");
-            ConfigurationStore.get().remove(EntityType.PROCESS, "sample2");
-            ConfigurationStore.get().remove(EntityType.PROCESS, "sample3");
-            Feed feed = parser.parseAndValidate(this.getClass()
-                    .getResourceAsStream(FEED_XML));
-            ConfigurationStore.get().publish(EntityType.FEED, feed);
-            storeEntity(EntityType.PROCESS, "sample");
-            storeEntity(EntityType.PROCESS, "sample2");
-            storeEntity(EntityType.PROCESS, "sample3");
-
-            //Try parsing the same feed xml
-            parser.parseAndValidate(this.getClass()
-                    .getResourceAsStream(FEED_XML));
-        } catch (Exception e) {
-            Assert.fail("Didn't expect feed parsing to fail", e);
-        }
-    }
-
-    @Test
-    public void testFeedUpdateWithViolations() throws Exception {
-        ConfigurationStore.get().remove(EntityType.FEED, "clicks");
-        ConfigurationStore.get().remove(EntityType.PROCESS, "sample");
-        ConfigurationStore.get().remove(EntityType.PROCESS, "sample2");
-        storeEntity(EntityType.FEED, "impressionFeed");
-        storeEntity(EntityType.FEED, "imp-click-join1");
-        storeEntity(EntityType.FEED, "imp-click-join2");
-        Feed feed = parser.parseAndValidate(this.getClass()
-                .getResourceAsStream(FEED_XML));
-        ConfigurationStore.get().publish(EntityType.FEED, feed);
-
-        dfsCluster.getFileSystem().mkdirs(new Path("/falcon/test/workflow"));
-        Process process = processParser.parseAndValidate(this.getClass()
-                .getResourceAsStream(PROCESS1_XML));
-        ConfigurationStore.get().publish(EntityType.PROCESS, process);
-        Process p1 = (Process) process.copy();
-        p1.setName("sample2");
-        ConfigurationStore.get().publish(EntityType.PROCESS, p1);
-
-        try {
-            //Try parsing the same feed xml
-            parser.parseAndValidate(this.getClass()
-                    .getResourceAsStream(FEED1_XML));
-            Assert.fail("Expected feed parsing to fail");
-        } catch (ValidationException ignore) {
-            //ignore
-        }
-    }
-}


[42/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/ClusterHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/ClusterHelper.java b/common/src/main/java/org/apache/falcon/entity/ClusterHelper.java
deleted file mode 100644
index 41c9369..0000000
--- a/common/src/main/java/org/apache/falcon/entity/ClusterHelper.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.cluster.Interface;
-import org.apache.falcon.entity.v0.cluster.Interfacetype;
-import org.apache.falcon.entity.v0.cluster.Location;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Helper to get end points relating to the cluster.
- */
-public final class ClusterHelper {
-    public static final String DEFAULT_BROKER_IMPL_CLASS = "org.apache.activemq.ActiveMQConnectionFactory";
-    public static final String WORKINGDIR = "working";
-    public static final String NO_USER_BROKER_URL = "NA";
-
-
-
-    private ClusterHelper() {
-    }
-
-    public static Cluster getCluster(String cluster) throws FalconException {
-        return ConfigurationStore.get().get(EntityType.CLUSTER, cluster);
-    }
-
-    public static Configuration getConfiguration(Cluster cluster) {
-        Configuration conf = new Configuration();
-
-        final String storageUrl = getStorageUrl(cluster);
-        conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, storageUrl);
-
-        final String executeEndPoint = getMREndPoint(cluster);
-        conf.set(HadoopClientFactory.MR_JT_ADDRESS_KEY, executeEndPoint);
-        conf.set(HadoopClientFactory.YARN_RM_ADDRESS_KEY, executeEndPoint);
-
-        if (cluster.getProperties() != null) {
-            for (Property prop : cluster.getProperties().getProperties()) {
-                conf.set(prop.getName(), prop.getValue());
-            }
-        }
-
-        return conf;
-    }
-
-    public static String getOozieUrl(Cluster cluster) {
-        return getInterface(cluster, Interfacetype.WORKFLOW).getEndpoint();
-    }
-
-    public static String getStorageUrl(Cluster cluster) {
-        return getNormalizedUrl(cluster, Interfacetype.WRITE);
-    }
-
-    public static String getReadOnlyStorageUrl(Cluster cluster) {
-        return getNormalizedUrl(cluster, Interfacetype.READONLY);
-    }
-
-    public static String getMREndPoint(Cluster cluster) {
-        return getInterface(cluster, Interfacetype.EXECUTE).getEndpoint();
-    }
-
-    public static String getRegistryEndPoint(Cluster cluster) {
-        final Interface catalogInterface = getInterface(cluster, Interfacetype.REGISTRY);
-        return catalogInterface == null ? null : catalogInterface.getEndpoint();
-    }
-
-    public static String getMessageBrokerUrl(Cluster cluster) {
-        final Interface messageInterface = getInterface(cluster, Interfacetype.MESSAGING);
-        return messageInterface == null ? NO_USER_BROKER_URL : messageInterface.getEndpoint();
-    }
-
-    public static String getMessageBrokerImplClass(Cluster cluster) {
-        if (cluster.getProperties() != null) {
-            for (Property prop : cluster.getProperties().getProperties()) {
-                if (prop.getName().equals("brokerImplClass")) {
-                    return prop.getValue();
-                }
-            }
-        }
-        return DEFAULT_BROKER_IMPL_CLASS;
-    }
-
-    public static Interface getInterface(Cluster cluster, Interfacetype type) {
-        for (Interface interf : cluster.getInterfaces().getInterfaces()) {
-            if (interf.getType() == type) {
-                return interf;
-            }
-        }
-        return null;
-    }
-
-    private static String getNormalizedUrl(Cluster cluster, Interfacetype type) {
-        String normalizedUrl = getInterface(cluster, type).getEndpoint();
-        if (normalizedUrl.endsWith("///")){
-            return normalizedUrl;
-        }
-        String normalizedPath = new Path(normalizedUrl + "/").toString();
-        return normalizedPath.substring(0, normalizedPath.length() - 1);
-    }
-
-
-
-    public static Location getLocation(Cluster cluster, ClusterLocationType clusterLocationType) {
-        for (Location loc : cluster.getLocations().getLocations()) {
-            if (loc.getName().equals(clusterLocationType)) {
-                return loc;
-            }
-        }
-        //Mocking the working location FALCON-910
-        if (clusterLocationType.equals(ClusterLocationType.WORKING)) {
-            Location staging = getLocation(cluster, ClusterLocationType.STAGING);
-            if (staging != null) {
-                Location working = new Location();
-                working.setName(ClusterLocationType.WORKING);
-                working.setPath(staging.getPath().charAt(staging.getPath().length() - 1) == '/'
-                        ?
-                        staging.getPath().concat(WORKINGDIR)
-                        :
-                        staging.getPath().concat("/").concat(WORKINGDIR));
-                return working;
-            }
-        }
-        return null;
-    }
-
-    /**
-     * Parsed the cluster object and checks for the working location.
-     *
-     * @param cluster
-     * @return
-     */
-    public static boolean checkWorkingLocationExists(Cluster cluster) {
-        for (Location loc : cluster.getLocations().getLocations()) {
-            if (loc.getName().equals(ClusterLocationType.WORKING)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    public static String getPropertyValue(Cluster cluster, String propName) {
-        if (cluster.getProperties() != null) {
-            for (Property prop : cluster.getProperties().getProperties()) {
-                if (prop.getName().equals(propName)) {
-                    return prop.getValue();
-                }
-            }
-        }
-        return null;
-    }
-
-    public static Map<String, String> getHiveProperties(Cluster cluster) {
-        if (cluster.getProperties() != null) {
-            List<Property> properties = cluster.getProperties().getProperties();
-            if (properties != null && !properties.isEmpty()) {
-                Map<String, String> hiveProperties = new HashMap<String, String>();
-                for (Property prop : properties) {
-                    if (prop.getName().startsWith("hive.")) {
-                        hiveProperties.put(prop.getName(), prop.getValue());
-                    }
-                }
-                return hiveProperties;
-            }
-        }
-        return null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/ColoClusterRelation.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/ColoClusterRelation.java b/common/src/main/java/org/apache/falcon/entity/ColoClusterRelation.java
deleted file mode 100644
index e4ca91b..0000000
--- a/common/src/main/java/org/apache/falcon/entity/ColoClusterRelation.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.service.ConfigurationChangeListener;
-
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Map of clusters in each colocation/ datacenter.
- */
-public final class ColoClusterRelation implements ConfigurationChangeListener {
-    private static final ConcurrentHashMap<String, Set<String>> COLO_CLUSTER_MAP =
-        new ConcurrentHashMap<String, Set<String>>();
-    private static final ColoClusterRelation INSTANCE = new ColoClusterRelation();
-
-    private ColoClusterRelation() {
-    }
-
-    public static ColoClusterRelation get() {
-        return INSTANCE;
-    }
-
-    public Set<String> getClusters(String colo) {
-        if (COLO_CLUSTER_MAP.containsKey(colo)) {
-            return COLO_CLUSTER_MAP.get(colo);
-        }
-        return new HashSet<String>();
-    }
-
-    @Override
-    public void onAdd(Entity entity) {
-        if (entity.getEntityType() != EntityType.CLUSTER) {
-            return;
-        }
-
-        Cluster cluster = (Cluster) entity;
-        COLO_CLUSTER_MAP.putIfAbsent(cluster.getColo(), new HashSet<String>());
-        COLO_CLUSTER_MAP.get(cluster.getColo()).add(cluster.getName());
-    }
-
-    @Override
-    public void onRemove(Entity entity) {
-        if (entity.getEntityType() != EntityType.CLUSTER) {
-            return;
-        }
-
-        Cluster cluster = (Cluster) entity;
-        COLO_CLUSTER_MAP.get(cluster.getColo()).remove(cluster.getName());
-        if (COLO_CLUSTER_MAP.get(cluster.getColo()).isEmpty()) {
-            COLO_CLUSTER_MAP.remove(cluster.getColo());
-        }
-    }
-
-    @Override
-    public void onChange(Entity oldEntity, Entity newEntity) throws FalconException {
-        if (oldEntity.getEntityType() != EntityType.CLUSTER) {
-            return;
-        }
-        throw new FalconException("change shouldn't be supported on cluster!");
-    }
-
-    @Override
-    public void onReload(Entity entity) throws FalconException {
-        onAdd(entity);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/DatasourceHelper.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/DatasourceHelper.java b/common/src/main/java/org/apache/falcon/entity/DatasourceHelper.java
deleted file mode 100644
index 51ce898..0000000
--- a/common/src/main/java/org/apache/falcon/entity/DatasourceHelper.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.datasource.Credential;
-import org.apache.falcon.entity.v0.datasource.Credentialtype;
-import org.apache.falcon.entity.v0.datasource.Datasource;
-import org.apache.falcon.entity.v0.datasource.DatasourceType;
-import org.apache.falcon.entity.v0.datasource.Interface;
-import org.apache.falcon.entity.v0.datasource.Interfaces;
-import org.apache.falcon.entity.v0.datasource.Interfacetype;
-import org.apache.falcon.entity.v0.datasource.PasswordAliasType;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.security.CredentialProviderHelper;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-
-/**
- * DataSource entity helper methods.
- */
-
-public final class DatasourceHelper {
-
-    public static final String HADOOP_CREDENTIAL_PROVIDER_FILEPATH = "hadoop.security.credential.provider.path";
-
-    private static final Logger LOG = LoggerFactory.getLogger(DatasourceHelper.class);
-
-    private static final ConfigurationStore STORE = ConfigurationStore.get();
-
-    public static DatasourceType getDatasourceType(String datasourceName) throws FalconException {
-        return getDatasource(datasourceName).getType();
-    }
-
-    private DatasourceHelper() {}
-
-    public static Datasource getDatasource(String datasourceName) throws FalconException {
-        return STORE.get(EntityType.DATASOURCE, datasourceName);
-    }
-
-    public static String getReadOnlyEndpoint(Datasource datasource) {
-        return getInterface(datasource, Interfacetype.READONLY);
-    }
-
-    public static String getWriteEndpoint(Datasource datasource) {
-        return getInterface(datasource, Interfacetype.WRITE);
-    }
-
-    /**
-     * Returns user name and password pair as it is specified in the XML. If the credential type is
-     * password-file, the path name is returned.
-     *
-     * @param db
-     * @return Credential
-     * @throws FalconException
-     */
-
-    public static Credential getReadPasswordInfo(Datasource db) throws FalconException {
-        for (Interface ifs : db.getInterfaces().getInterfaces()) {
-            if ((ifs.getType() == Interfacetype.READONLY) && (ifs.getCredential() != null)) {
-                return ifs.getCredential();
-            }
-        }
-        return getDefaultPasswordInfo(db.getInterfaces());
-    }
-
-    public static Credential getWritePasswordInfo(Datasource db) throws FalconException {
-        for (Interface ifs : db.getInterfaces().getInterfaces()) {
-            if ((ifs.getType() == Interfacetype.WRITE) && (ifs.getCredential() != null)) {
-                return ifs.getCredential();
-            }
-        }
-        return getDefaultPasswordInfo(db.getInterfaces());
-    }
-
-    /**
-     * Returns user name and actual password pair. If the credential type is password-file, then the
-     * password is read from the HDFS file. If the credential type is password-text, the clear text
-     * password is returned.
-     *
-     * @param db
-     * @return
-     * @throws FalconException
-     */
-    public static java.util.Properties fetchReadPasswordInfo(Datasource db) throws FalconException {
-        Credential cred = getReadPasswordInfo(db);
-        return fetchPasswordInfo(cred);
-    }
-
-    public static java.util.Properties fetchWritePasswordInfo(Datasource db) throws FalconException {
-        Credential cred = getWritePasswordInfo(db);
-        return fetchPasswordInfo(cred);
-    }
-
-    public static java.util.Properties fetchPasswordInfo(Credential cred) throws FalconException {
-        java.util.Properties p = new java.util.Properties();
-        p.put("user", cred.getUserName());
-        if (cred.getType() == Credentialtype.PASSWORD_TEXT) {
-            p.put("password", cred.getPasswordText());
-        } else if (cred.getType() == Credentialtype.PASSWORD_FILE) {
-            String actualPasswd = fetchPasswordInfoFromFile(cred.getPasswordFile());
-            p.put("password", actualPasswd);
-        } else if (cred.getType() == Credentialtype.PASSWORD_ALIAS) {
-            String actualPasswd = fetchPasswordInfoFromCredentialStore(cred.getPasswordAlias());
-            p.put("password", actualPasswd);
-        }
-        return p;
-    }
-
-    public static String buildJceksProviderPath(URI credURI) {
-        StringBuilder sb = new StringBuilder();
-        final String credProviderPath = sb.append("jceks:").append("//")
-                .append(credURI.getScheme()).append("@")
-                .append(credURI.getHost())
-                .append(credURI.getPath()).toString();
-        return credProviderPath;
-    }
-
-    /**
-     * Return the Interface endpoint for the interface type specified in the argument.
-     *
-     * @param db
-     * @param type - can be read-only or write
-     * @return
-     */
-    private static String getInterface(Datasource db, Interfacetype type) {
-        for(Interface ifs : db.getInterfaces().getInterfaces()) {
-            if (ifs.getType() == type) {
-                return ifs.getEndpoint();
-            }
-        }
-        return null;
-    }
-
-    private static Credential getDefaultPasswordInfo(Interfaces ifs) throws FalconException {
-
-        if (ifs.getCredential() != null) {
-            return ifs.getCredential();
-        } else {
-            throw new FalconException("Missing Interfaces default credential");
-        }
-    }
-
-    private static String fetchPasswordInfoFromCredentialStore(final PasswordAliasType c) throws FalconException {
-        try {
-            final String credPath = c.getProviderPath();
-            final URI credURI = new URI(credPath);
-            if (StringUtils.isBlank(credURI.getScheme())
-                || StringUtils.isBlank(credURI.getHost())
-                || StringUtils.isBlank(credURI.getPath())) {
-                throw new FalconException("Password alias jceks provider HDFS path is incorrect.");
-            }
-            final String alias = c.getAlias();
-            if (StringUtils.isBlank(alias)) {
-                throw new FalconException("Password alias is empty.");
-            }
-
-            final String credProviderPath = buildJceksProviderPath(credURI);
-            LOG.info("Credential provider HDFS path : " + credProviderPath);
-
-            if (CredentialProviderHelper.isProviderAvailable()) {
-                UserGroupInformation ugi = CurrentUser.getProxyUGI();
-                String password = ugi.doAs(new PrivilegedExceptionAction<String>() {
-                    public String run() throws Exception {
-                        final Configuration conf = new Configuration();
-                        conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, credPath);
-                        conf.set(CredentialProviderHelper.CREDENTIAL_PROVIDER_PATH, credProviderPath);
-                        FileSystem fs = FileSystem.get(credURI, conf);
-                        if (!fs.exists(new Path(credPath))) {
-                            String msg = String.format("Credential provider hdfs path [%s] does not "
-                                   + "exist or access denied!", credPath);
-                            LOG.error(msg);
-                            throw new FalconException(msg);
-                        }
-                        return CredentialProviderHelper.resolveAlias(conf, alias);
-                    }
-                });
-                return password;
-            } else {
-                throw new FalconException("Credential Provider is not initialized");
-            }
-        } catch (Exception ioe) {
-            String msg = "Exception while trying to fetch credential alias";
-            LOG.error(msg, ioe);
-            throw new FalconException(msg, ioe);
-        }
-    }
-    private static String fetchPasswordInfoFromFile(String passwordFilePath) throws FalconException {
-        try {
-            Path path = new Path(passwordFilePath);
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(path.toUri());
-            if (!fs.exists(path)) {
-                throw new IOException("The password file does not exist! "
-                        + passwordFilePath);
-            }
-
-            if (!fs.isFile(path)) {
-                throw new IOException("The password file cannot be a directory! "
-                        + passwordFilePath);
-            }
-
-            InputStream is = fs.open(path);
-            StringWriter writer = new StringWriter();
-            try {
-                IOUtils.copy(is, writer);
-                return writer.toString();
-            } finally {
-                IOUtils.closeQuietly(is);
-                IOUtils.closeQuietly(writer);
-                fs.close();
-            }
-        } catch (IOException ioe) {
-            LOG.error("Error reading password file from HDFS : " + ioe);
-            throw new FalconException(ioe);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/EntityNotRegisteredException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/EntityNotRegisteredException.java b/common/src/main/java/org/apache/falcon/entity/EntityNotRegisteredException.java
deleted file mode 100644
index 40f83e4..0000000
--- a/common/src/main/java/org/apache/falcon/entity/EntityNotRegisteredException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Exception thrown by falcon when entity is not registered already in config store.
- */
-public class EntityNotRegisteredException extends FalconException {
-
-    public EntityNotRegisteredException(String message) {
-        super(message);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/EntityUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/EntityUtil.java b/common/src/main/java/org/apache/falcon/entity/EntityUtil.java
deleted file mode 100644
index 96befa1..0000000
--- a/common/src/main/java/org/apache/falcon/entity/EntityUtil.java
+++ /dev/null
@@ -1,1085 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.commons.codec.binary.Hex;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.Pair;
-import org.apache.falcon.Tag;
-import org.apache.falcon.entity.WorkflowNameBuilder.WorkflowName;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityGraph;
-import org.apache.falcon.entity.v0.EntityNotification;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.entity.v0.cluster.Cluster;
-import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
-import org.apache.falcon.entity.v0.datasource.DatasourceType;
-import org.apache.falcon.entity.v0.cluster.Property;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.entity.v0.feed.Feed;
-import org.apache.falcon.entity.v0.process.LateInput;
-import org.apache.falcon.entity.v0.process.LateProcess;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.entity.v0.process.Process;
-import org.apache.falcon.entity.v0.process.Retry;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.util.DeploymentUtil;
-import org.apache.falcon.util.RuntimeProperties;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-
-/**
- * Helper to get entity object.
- */
-public final class EntityUtil {
-    public static final Logger LOG = LoggerFactory.getLogger(EntityUtil.class);
-
-    public static final String MR_QUEUE_NAME = "queueName";
-
-    private static final long MINUTE_IN_MS = 60 * 1000L;
-    private static final long HOUR_IN_MS = 60 * MINUTE_IN_MS;
-    private static final long DAY_IN_MS = 24 * HOUR_IN_MS;
-    private static final long MONTH_IN_MS = 31 * DAY_IN_MS;
-    private static final long ONE_MS = 1;
-    public static final String MR_JOB_PRIORITY = "jobPriority";
-
-    public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
-    public static final String WF_LIB_SEPARATOR = ",";
-    private static final String STAGING_DIR_NAME_SEPARATOR = "_";
-
-    /** Priority with which the DAG will be scheduled.
-     *  Matches the five priorities of Hadoop jobs.
-     */
-    public enum JOBPRIORITY {
-        VERY_HIGH((short) 1),
-        HIGH((short) 2),
-        NORMAL((short) 3),
-        LOW((short) 4),
-        VERY_LOW((short) 5);
-
-        private short priority;
-
-        public short getPriority() {
-            return priority;
-        }
-
-        JOBPRIORITY(short priority) {
-            this.priority = priority;
-        }
-    }
-
-    private EntityUtil() {}
-
-    public static <T extends Entity> T getEntity(EntityType type, String entityName) throws FalconException {
-        ConfigurationStore configStore = ConfigurationStore.get();
-        T entity = configStore.get(type, entityName);
-        if (entity == null) {
-            throw new EntityNotRegisteredException(entityName + " (" + type + ") not found");
-        }
-        return entity;
-    }
-
-    public static <T extends Entity> T getEntity(String type, String entityName) throws FalconException {
-        EntityType entityType;
-        try {
-            entityType = EntityType.getEnum(type);
-        } catch (IllegalArgumentException e) {
-            throw new FalconException("Invalid entity type: " + type, e);
-        }
-        return getEntity(entityType, entityName);
-    }
-
-    public static TimeZone getTimeZone(String tzId) {
-        if (tzId == null) {
-            throw new IllegalArgumentException("Invalid TimeZone: Cannot be null.");
-        }
-        TimeZone tz = TimeZone.getTimeZone(tzId);
-        if (!tzId.equals("GMT") && tz.getID().equals("GMT")) {
-            throw new IllegalArgumentException("Invalid TimeZone: " + tzId);
-        }
-        return tz;
-    }
-
-    public static Date getEndTime(Entity entity, String cluster) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            return getEndTime((Process) entity, cluster);
-        } else {
-            return getEndTime((Feed) entity, cluster);
-        }
-    }
-
-    public static Date parseDateUTC(String dateStr) throws FalconException {
-        try {
-            return SchemaHelper.parseDateUTC(dateStr);
-        } catch (Exception e) {
-            throw new FalconException(e);
-        }
-    }
-
-    public static Date getStartTime(Entity entity, String cluster) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            return getStartTime((Process) entity, cluster);
-        } else {
-            return getStartTime((Feed) entity, cluster);
-        }
-    }
-
-    public static Date getEndTime(Process process, String cluster) {
-        org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process, cluster);
-        return processCluster.getValidity().getEnd();
-    }
-
-    public static Date getStartTime(Process process, String cluster) {
-        org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process, cluster);
-        return processCluster.getValidity().getStart();
-    }
-
-    public static Date getEndTime(Feed feed, String cluster) {
-        org.apache.falcon.entity.v0.feed.Cluster clusterDef = FeedHelper.getCluster(feed, cluster);
-        return clusterDef.getValidity().getEnd();
-    }
-
-    public static Date getStartTime(Feed feed, String cluster) {
-        org.apache.falcon.entity.v0.feed.Cluster clusterDef = FeedHelper.getCluster(feed, cluster);
-        return clusterDef.getValidity().getStart();
-    }
-
-    public static int getParallel(Entity entity) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            return getParallel((Process) entity);
-        } else {
-            return getParallel((Feed) entity);
-        }
-    }
-
-    public static void setStartDate(Entity entity, String cluster, Date startDate) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            setStartDate((Process) entity, cluster, startDate);
-        } else {
-            setStartDate((Feed) entity, cluster, startDate);
-        }
-    }
-
-    public static void setEndTime(Entity entity, String cluster, Date endDate) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            setEndTime((Process) entity, cluster, endDate);
-        } else {
-            setEndTime((Feed) entity, cluster, endDate);
-        }
-    }
-
-    public static void setParallel(Entity entity, int parallel) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            setParallel((Process) entity, parallel);
-        } else {
-            setParallel((Feed) entity, parallel);
-        }
-    }
-
-    public static int getParallel(Process process) {
-        return process.getParallel();
-    }
-
-    public static void setStartDate(Process process, String cluster, Date startDate) {
-        org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process, cluster);
-        processCluster.getValidity().setStart(startDate);
-    }
-
-    public static void setParallel(Process process, int parallel) {
-        process.setParallel(parallel);
-    }
-
-    public static void setEndTime(Process process, String cluster, Date endDate) {
-        org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process, cluster);
-        processCluster.getValidity().setEnd(endDate);
-    }
-
-    public static int getParallel(Feed feed) {
-        // todo - how this this supposed to work?
-        return 1;
-    }
-
-    public static void setStartDate(Feed feed, String cluster, Date startDate) {
-        org.apache.falcon.entity.v0.feed.Cluster clusterDef = FeedHelper.getCluster(feed, cluster);
-        clusterDef.getValidity().setStart(startDate);
-    }
-
-    public static void setEndTime(Feed feed, String cluster, Date endDate) {
-        org.apache.falcon.entity.v0.feed.Cluster clusterDef = FeedHelper.getCluster(feed, cluster);
-        clusterDef.getValidity().setStart(endDate);
-    }
-
-    public static void setParallel(Feed feed, int parallel) {
-    }
-
-    public static Frequency getFrequency(Entity entity) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            return getFrequency((Process) entity);
-        } else {
-            return getFrequency((Feed) entity);
-        }
-    }
-
-    public static Frequency getFrequency(Process process) {
-        return process.getFrequency();
-    }
-
-    public static Frequency getFrequency(Feed feed) {
-        return feed.getFrequency();
-    }
-
-    public static TimeZone getTimeZone(Entity entity) {
-        if (entity.getEntityType() == EntityType.PROCESS) {
-            return getTimeZone((Process) entity);
-        } else {
-            return getTimeZone((Feed) entity);
-        }
-    }
-
-    public static TimeZone getTimeZone(Process process) {
-        return process.getTimezone();
-    }
-
-    public static TimeZone getTimeZone(Feed feed) {
-        return feed.getTimezone();
-    }
-
-    /**
-     * Returns true if the given instanceTime is a valid instanceTime on the basis of startTime and frequency of an
-     * entity.
-     *
-     * It doesn't check the instanceTime being after the validity of entity.
-     * @param startTime startTime of the entity
-     * @param frequency frequency of the entity.
-     * @param timezone timezone of the entity.
-     * @param instanceTime instanceTime to be checked for validity
-     * @return
-     */
-    public static boolean isValidInstanceTime(Date startTime, Frequency frequency, TimeZone timezone,
-        Date instanceTime) {
-        Date next = getNextStartTime(startTime, frequency, timezone, instanceTime);
-        return next.equals(instanceTime);
-    }
-
-    public static Date getNextStartTime(Date startTime, Frequency frequency, TimeZone timezone, Date referenceTime) {
-        if (startTime.after(referenceTime)) {
-            return startTime;
-        }
-
-        Calendar startCal = Calendar.getInstance(timezone);
-        startCal.setTime(startTime);
-
-        int count = 0;
-        switch (frequency.getTimeUnit()) {
-        case months:
-            count = (int) ((referenceTime.getTime() - startTime.getTime()) / MONTH_IN_MS);
-            break;
-        case days:
-            count = (int) ((referenceTime.getTime() - startTime.getTime()) / DAY_IN_MS);
-            break;
-        case hours:
-            count = (int) ((referenceTime.getTime() - startTime.getTime()) / HOUR_IN_MS);
-            break;
-        case minutes:
-            count = (int) ((referenceTime.getTime() - startTime.getTime()) / MINUTE_IN_MS);
-            break;
-        default:
-        }
-
-        final int freq = frequency.getFrequencyAsInt();
-        if (count > 2) {
-            startCal.add(frequency.getTimeUnit().getCalendarUnit(), ((count - 2) / freq) * freq);
-        }
-        while (startCal.getTime().before(referenceTime)) {
-            startCal.add(frequency.getTimeUnit().getCalendarUnit(), freq);
-        }
-        return startCal.getTime();
-    }
-
-
-    public static Properties getEntityProperties(Entity myEntity) {
-        Properties properties = new Properties();
-        switch (myEntity.getEntityType()) {
-        case CLUSTER:
-            org.apache.falcon.entity.v0.cluster.Properties clusterProps = ((Cluster) myEntity).getProperties();
-            if (clusterProps != null) {
-                for (Property prop : clusterProps.getProperties()) {
-                    properties.put(prop.getName(), prop.getValue());
-                }
-            }
-            break;
-
-        case FEED:
-            org.apache.falcon.entity.v0.feed.Properties feedProps = ((Feed) myEntity).getProperties();
-            if (feedProps != null) {
-                for (org.apache.falcon.entity.v0.feed.Property prop : feedProps.getProperties()) {
-                    properties.put(prop.getName(), prop.getValue());
-                }
-            }
-            break;
-
-        case PROCESS:
-            org.apache.falcon.entity.v0.process.Properties processProps = ((Process) myEntity).getProperties();
-            if (processProps != null) {
-                for (org.apache.falcon.entity.v0.process.Property prop : processProps.getProperties()) {
-                    properties.put(prop.getName(), prop.getValue());
-                }
-            }
-            break;
-
-        default:
-            throw new IllegalArgumentException("Unhandled entity type " + myEntity.getEntityType());
-        }
-        return properties;
-    }
-
-
-    public static int getInstanceSequence(Date startTime, Frequency frequency, TimeZone tz, Date instanceTime) {
-        if (startTime.after(instanceTime)) {
-            return -1;
-        }
-
-        if (tz == null) {
-            tz = TimeZone.getTimeZone("UTC");
-        }
-
-        Calendar startCal = Calendar.getInstance(tz);
-        startCal.setTime(startTime);
-
-        int count = 0;
-        switch (frequency.getTimeUnit()) {
-        case months:
-            count = (int) ((instanceTime.getTime() - startTime.getTime()) / MONTH_IN_MS);
-            break;
-        case days:
-            count = (int) ((instanceTime.getTime() - startTime.getTime()) / DAY_IN_MS);
-            break;
-        case hours:
-            count = (int) ((instanceTime.getTime() - startTime.getTime()) / HOUR_IN_MS);
-            break;
-        case minutes:
-            count = (int) ((instanceTime.getTime() - startTime.getTime()) / MINUTE_IN_MS);
-            break;
-        default:
-        }
-
-        final int freq = frequency.getFrequencyAsInt();
-        if (count > 2) {
-            startCal.add(frequency.getTimeUnit().getCalendarUnit(), (count / freq) * freq);
-            count = (count / freq);
-        } else {
-            count = 0;
-        }
-        while (startCal.getTime().before(instanceTime)) {
-            startCal.add(frequency.getTimeUnit().getCalendarUnit(), freq);
-            count++;
-        }
-        return count + 1;
-    }
-
-    public static Date getNextInstanceTime(Date instanceTime, Frequency frequency, TimeZone tz, int instanceCount) {
-        if (tz == null) {
-            tz = TimeZone.getTimeZone("UTC");
-        }
-        Calendar insCal = Calendar.getInstance(tz);
-        insCal.setTime(instanceTime);
-
-        final int freq = frequency.getFrequencyAsInt() * instanceCount;
-        insCal.add(frequency.getTimeUnit().getCalendarUnit(), freq);
-
-        return insCal.getTime();
-    }
-
-    public static String md5(Entity entity) throws FalconException {
-        return new String(Hex.encodeHex(DigestUtils.md5(stringOf(entity))));
-    }
-
-    public static boolean equals(Entity lhs, Entity rhs) throws FalconException {
-        return equals(lhs, rhs, null);
-    }
-
-    public static boolean equals(Entity lhs, Entity rhs, String[] filterProps) throws FalconException {
-        if (lhs == null && rhs == null) {
-            return true;
-        }
-        if (lhs == null || rhs == null) {
-            return false;
-        }
-
-        if (lhs.equals(rhs)) {
-            String lhsString = stringOf(lhs, filterProps);
-            String rhsString = stringOf(rhs, filterProps);
-            return lhsString.equals(rhsString);
-        } else {
-            return false;
-        }
-    }
-
-    public static String stringOf(Entity entity) throws FalconException {
-        return stringOf(entity, null);
-    }
-
-    private static String stringOf(Entity entity, String[] filterProps) throws FalconException {
-        Map<String, String> map = new HashMap<String, String>();
-        mapToProperties(entity, null, map, filterProps);
-        List<String> keyList = new ArrayList<String>(map.keySet());
-        Collections.sort(keyList);
-        StringBuilder builer = new StringBuilder();
-        for (String key : keyList) {
-            builer.append(key).append('=').append(map.get(key)).append('\n');
-        }
-        return builer.toString();
-    }
-
-    @SuppressWarnings("rawtypes")
-    private static void mapToProperties(Object obj, String name, Map<String, String> propMap, String[] filterProps)
-        throws FalconException {
-
-        if (obj == null) {
-            return;
-        }
-
-        if (filterProps != null && name != null) {
-            for (String filter : filterProps) {
-                if (name.matches(filter.replace(".", "\\.").replace("[", "\\[").replace("]", "\\]"))) {
-                    return;
-                }
-            }
-        }
-
-        if (Date.class.isAssignableFrom(obj.getClass())) {
-            propMap.put(name, SchemaHelper.formatDateUTC((Date) obj));
-        } else if (obj.getClass().getPackage().getName().equals("java.lang")) {
-            propMap.put(name, String.valueOf(obj));
-        } else if (TimeZone.class.isAssignableFrom(obj.getClass())) {
-            propMap.put(name, ((TimeZone) obj).getID());
-        } else if (Enum.class.isAssignableFrom(obj.getClass())) {
-            propMap.put(name, ((Enum) obj).name());
-        } else if (List.class.isAssignableFrom(obj.getClass())) {
-            List list = (List) obj;
-            for (int index = 0; index < list.size(); index++) {
-                mapToProperties(list.get(index), name + "[" + index + "]", propMap, filterProps);
-            }
-        } else {
-            try {
-                Method method = obj.getClass().getDeclaredMethod("toString");
-                propMap.put(name, (String) method.invoke(obj));
-            } catch (NoSuchMethodException e) {
-                try {
-                    Map map = PropertyUtils.describe(obj);
-                    for (Object entry : map.entrySet()) {
-                        String key = (String)((Map.Entry)entry).getKey();
-                        if (!key.equals("class")) {
-                            mapToProperties(map.get(key), name != null ? name + "." + key : key, propMap,
-                                    filterProps);
-                        } else {
-                            // Just add the parent element to the list too.
-                            // Required to detect addition/removal of optional elements with child nodes.
-                            // For example, late-process
-                            propMap.put(((Class)map.get(key)).getSimpleName(), "");
-                        }
-                    }
-                } catch (Exception e1) {
-                    throw new FalconException(e1);
-                }
-            } catch (Exception e) {
-                throw new FalconException(e);
-            }
-        }
-    }
-
-    public static WorkflowName getWorkflowName(Tag tag, List<String> suffixes,
-                                               Entity entity) {
-        WorkflowNameBuilder<Entity> builder = new WorkflowNameBuilder<Entity>(
-                entity);
-        builder.setTag(tag);
-        builder.setSuffixes(suffixes);
-        return builder.getWorkflowName();
-    }
-
-    public static WorkflowName getWorkflowName(Tag tag, Entity entity) {
-        return getWorkflowName(tag, null, entity);
-    }
-
-    public static WorkflowName getWorkflowName(Entity entity) {
-        return getWorkflowName(null, null, entity);
-    }
-
-    public static String getWorkflowNameSuffix(String workflowName,
-                                               Entity entity) throws FalconException {
-        WorkflowNameBuilder<Entity> builder = new WorkflowNameBuilder<Entity>(
-                entity);
-        return builder.getWorkflowSuffixes(workflowName).replaceAll("_", "");
-    }
-
-    public static Tag getWorkflowNameTag(String workflowName, Entity entity) {
-        WorkflowNameBuilder<Entity> builder = new WorkflowNameBuilder<Entity>(
-                entity);
-        return builder.getWorkflowTag(workflowName);
-    }
-
-    public static List<String> getWorkflowNames(Entity entity) {
-        switch(entity.getEntityType()) {
-        case FEED:
-            return Arrays.asList(getWorkflowName(Tag.RETENTION, entity).toString(),
-                getWorkflowName(Tag.REPLICATION, entity).toString());
-
-        case PROCESS:
-            return Arrays.asList(getWorkflowName(Tag.DEFAULT, entity).toString());
-
-        default:
-        }
-        throw new IllegalArgumentException("Unhandled type: " + entity.getEntityType());
-    }
-
-    public static <T extends Entity> T getClusterView(T entity, String clusterName) {
-        switch (entity.getEntityType()) {
-        case CLUSTER:
-            return entity;
-
-        case FEED:
-            Feed feed = (Feed) entity.copy();
-            org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, clusterName);
-            Iterator<org.apache.falcon.entity.v0.feed.Cluster> itr = feed.getClusters().getClusters().iterator();
-            while (itr.hasNext()) {
-                org.apache.falcon.entity.v0.feed.Cluster cluster = itr.next();
-                //In addition to retaining the required clster, retain the sources clusters if this is the target
-                // cluster
-                //1. Retain cluster if cluster n
-                if (!(cluster.getName().equals(clusterName)
-                        || (feedCluster.getType() == ClusterType.TARGET
-                        && cluster.getType() == ClusterType.SOURCE))) {
-                    itr.remove();
-                }
-            }
-            return (T) feed;
-
-        case PROCESS:
-            Process process = (Process) entity.copy();
-            Iterator<org.apache.falcon.entity.v0.process.Cluster> procItr =
-                process.getClusters().getClusters().iterator();
-            while (procItr.hasNext()) {
-                org.apache.falcon.entity.v0.process.Cluster cluster = procItr.next();
-                if (!cluster.getName().equals(clusterName)) {
-                    procItr.remove();
-                }
-            }
-            return (T) process;
-        default:
-        }
-        throw new UnsupportedOperationException("Not supported for entity type " + entity.getEntityType());
-    }
-
-    public static Set<String> getClustersDefined(Entity entity) {
-        Set<String> clusters = new HashSet<String>();
-        switch (entity.getEntityType()) {
-        case CLUSTER:
-            clusters.add(entity.getName());
-            break;
-
-        case FEED:
-            Feed feed = (Feed) entity;
-            for (org.apache.falcon.entity.v0.feed.Cluster cluster : feed.getClusters().getClusters()) {
-                clusters.add(cluster.getName());
-            }
-            break;
-
-        case PROCESS:
-            Process process = (Process) entity;
-            for (org.apache.falcon.entity.v0.process.Cluster cluster : process.getClusters().getClusters()) {
-                clusters.add(cluster.getName());
-            }
-            break;
-        default:
-        }
-        return clusters;
-    }
-
-    public static Set<String> getClustersDefinedInColos(Entity entity) {
-        Set<String> entityClusters = EntityUtil.getClustersDefined(entity);
-        if (DeploymentUtil.isEmbeddedMode()) {
-            return entityClusters;
-        }
-
-        Set<String> myClusters = DeploymentUtil.getCurrentClusters();
-        Set<String> applicableClusters = new HashSet<String>();
-        for (String cluster : entityClusters) {
-            if (myClusters.contains(cluster)) {
-                applicableClusters.add(cluster);
-            }
-        }
-        return applicableClusters;
-    }
-
-    public static Retry getRetry(Entity entity) throws FalconException {
-        switch (entity.getEntityType()) {
-        case FEED:
-            if (!RuntimeProperties.get()
-                    .getProperty("feed.retry.allowed", "true")
-                    .equalsIgnoreCase("true")) {
-                return null;
-            }
-            Retry retry = new Retry();
-            retry.setAttempts(Integer.parseInt(RuntimeProperties.get()
-                    .getProperty("feed.retry.attempts", "3")));
-            retry.setDelay(new Frequency(RuntimeProperties.get().getProperty(
-                    "feed.retry.frequency", "minutes(5)")));
-            retry.setPolicy(PolicyType.fromValue(RuntimeProperties.get()
-                    .getProperty("feed.retry.policy", "exp-backoff")));
-            retry.setOnTimeout(Boolean.valueOf(RuntimeProperties.get().getProperty("feed.retry.onTimeout", "false")));
-            return retry;
-        case PROCESS:
-            Process process = (Process) entity;
-            return process.getRetry();
-        default:
-            throw new FalconException("Cannot create Retry for entity:" + entity.getName());
-        }
-    }
-
-    //Staging path that stores scheduler configs like oozie coord/bundle xmls, parent workflow xml
-    //Each entity update creates a new staging path
-    //Base staging path is the base path for all staging dirs
-    public static Path getBaseStagingPath(Cluster cluster, Entity entity) {
-        return new Path(ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING).getPath(),
-                "falcon/workflows/" + entity.getEntityType().name().toLowerCase() + "/" + entity.getName());
-    }
-
-    /**
-     * Gets the latest staging path for an entity on a cluster, based on the dir name(that contains timestamp).
-     * @param cluster
-     * @param entity
-     * @return
-     * @throws FalconException
-     */
-    public static Path getLatestStagingPath(org.apache.falcon.entity.v0.cluster.Cluster cluster, final Entity entity)
-        throws FalconException {
-        Path basePath = getBaseStagingPath(cluster, entity);
-        FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(
-                ClusterHelper.getConfiguration(cluster));
-        try {
-            final String md5 = md5(getClusterView(entity, cluster.getName()));
-            FileStatus[] files = fs.listStatus(basePath, new PathFilter() {
-                @Override
-                public boolean accept(Path path) {
-                    return path.getName().startsWith(md5);
-                }
-            });
-            if (files != null && files.length != 0) {
-                // Find the latest directory using the timestamp used in the dir name
-                // These files will vary only in ts suffix (as we have filtered out using a common md5 hash),
-                // hence, sorting will be on timestamp.
-                // FileStatus compares on Path and hence the latest will be at the end after sorting.
-                Arrays.sort(files);
-                return files[files.length - 1].getPath();
-            }
-            throw new FalconException("No staging directories found for entity " + entity.getName() + " on cluster "
-                + cluster.getName());
-        } catch (Exception e) {
-            throw new FalconException("Unable get listing for " + basePath.toString(), e);
-        }
-    }
-
-    //Creates new staging path for entity schedule/update
-    //Staging path containd md5 of the cluster view of the entity. This is required to check if update is required
-    public static Path getNewStagingPath(Cluster cluster, Entity entity)
-        throws FalconException {
-        Entity clusterView = getClusterView(entity, cluster.getName());
-        return new Path(getBaseStagingPath(cluster, entity),
-            md5(clusterView) + STAGING_DIR_NAME_SEPARATOR + String.valueOf(System.currentTimeMillis()));
-    }
-
-    // Given an entity and a cluster, determines if the supplied path is the staging path for that entity.
-    public static boolean isStagingPath(Cluster cluster,
-                                        Entity entity, Path path) throws FalconException {
-        String basePath = new Path(ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING)
-                .getPath()).toUri().getPath();
-        try {
-            FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(
-                    ClusterHelper.getConfiguration(cluster));
-            String pathString = path.toUri().getPath();
-            String entityPath = entity.getEntityType().name().toLowerCase() + "/" + entity.getName();
-            return fs.exists(path) && pathString.startsWith(basePath) && pathString.contains(entityPath);
-        } catch (IOException e) {
-            throw new FalconException(e);
-        }
-    }
-
-    public static LateProcess getLateProcess(Entity entity)
-        throws FalconException {
-
-        switch (entity.getEntityType()) {
-        case FEED:
-            if (!RuntimeProperties.get().getProperty("feed.late.allowed", "true").equalsIgnoreCase("true")) {
-                return null;
-            }
-
-            //If late Arrival is not configured do not process further
-            if (((Feed) entity).getLateArrival() == null){
-                return null;
-            }
-
-            LateProcess lateProcess = new LateProcess();
-            lateProcess.setDelay(new Frequency(RuntimeProperties.get().getProperty("feed.late.frequency", "hours(3)")));
-            lateProcess.setPolicy(
-                    PolicyType.fromValue(RuntimeProperties.get().getProperty("feed.late.policy", "exp-backoff")));
-            LateInput lateInput = new LateInput();
-            lateInput.setInput(entity.getName());
-            //TODO - Assuming the late workflow is not used
-            lateInput.setWorkflowPath("ignore.xml");
-            lateProcess.getLateInputs().add(lateInput);
-            return lateProcess;
-        case PROCESS:
-            Process process = (Process) entity;
-            return process.getLateProcess();
-        default:
-            throw new FalconException("Cannot create Late Process for entity:" + entity.getName());
-        }
-    }
-
-    public static Path getLogPath(Cluster cluster, Entity entity) {
-        return new Path(getBaseStagingPath(cluster, entity), "logs");
-    }
-
-    public static String fromUTCtoURIDate(String utc) throws FalconException {
-        DateFormat utcFormat = new SimpleDateFormat(
-                "yyyy'-'MM'-'dd'T'HH':'mm'Z'");
-        Date utcDate;
-        try {
-            utcDate = utcFormat.parse(utc);
-        } catch (ParseException e) {
-            throw new FalconException("Unable to parse utc date:", e);
-        }
-        DateFormat uriFormat = new SimpleDateFormat("yyyy'-'MM'-'dd'-'HH'-'mm");
-        return uriFormat.format(utcDate);
-    }
-
-    public static boolean responsibleFor(String colo) {
-        return DeploymentUtil.isEmbeddedMode() || (!DeploymentUtil.isPrism()
-                && colo.equals(DeploymentUtil.getCurrentColo()));
-    }
-
-    public static Date getNextStartTime(Entity entity, Cluster cluster, Date effectiveTime) {
-        switch(entity.getEntityType()) {
-        case FEED:
-            Feed feed = (Feed) entity;
-            org.apache.falcon.entity.v0.feed.Cluster feedCluster = FeedHelper.getCluster(feed, cluster.getName());
-            return getNextStartTime(feedCluster.getValidity().getStart(), feed.getFrequency(), feed.getTimezone(),
-                effectiveTime);
-
-        case PROCESS:
-            Process process = (Process) entity;
-            org.apache.falcon.entity.v0.process.Cluster processCluster = ProcessHelper.getCluster(process,
-                cluster.getName());
-            return getNextStartTime(processCluster.getValidity().getStart(), process.getFrequency(),
-                process.getTimezone(), effectiveTime);
-
-        default:
-        }
-
-        throw new IllegalArgumentException("Unhandled type: " + entity.getEntityType());
-    }
-
-    public static boolean isTableStorageType(Cluster cluster, Entity entity) throws FalconException {
-        return entity.getEntityType() == EntityType.PROCESS
-            ? isTableStorageType(cluster, (Process) entity) : isTableStorageType(cluster, (Feed) entity);
-    }
-
-    public static boolean isTableStorageType(Cluster cluster, Feed feed) throws FalconException {
-        Storage.TYPE storageType = FeedHelper.getStorageType(feed, cluster);
-        return Storage.TYPE.TABLE == storageType;
-    }
-
-    public static boolean isTableStorageType(Cluster cluster, Process process) throws FalconException {
-        Storage.TYPE storageType = ProcessHelper.getStorageType(cluster, process);
-        return Storage.TYPE.TABLE == storageType;
-    }
-
-    public static List<String> getTags(Entity entity) {
-        String rawTags = null;
-
-        switch (entity.getEntityType()) {
-        case PROCESS:
-            rawTags = ((Process) entity).getTags();
-            break;
-
-        case FEED:
-            rawTags = ((Feed) entity).getTags();
-            break;
-
-        case CLUSTER:
-            rawTags = ((Cluster) entity).getTags();
-            break;
-
-        default:
-            break;
-        }
-
-        List<String> tags = new ArrayList<String>();
-        if (!StringUtils.isEmpty(rawTags)) {
-            for(String tag : rawTags.split(",")) {
-                tags.add(tag.trim());
-            }
-        }
-
-        return tags;
-    }
-
-    public static List<String> getPipelines(Entity entity) {
-        List<String> pipelines = new ArrayList<String>();
-
-        if (entity.getEntityType().equals(EntityType.PROCESS)) {
-            Process process = (Process) entity;
-            String pipelineString = process.getPipelines();
-            if (pipelineString != null) {
-                for (String pipeline : pipelineString.split(",")) {
-                    pipelines.add(pipeline.trim());
-                }
-            }
-        } // else : Pipelines are only set for Process entities
-
-        return pipelines;
-    }
-
-    public static EntityList getEntityDependencies(Entity entity) throws FalconException {
-        Set<Entity> dependents = EntityGraph.get().getDependents(entity);
-        Entity[] dependentEntities = dependents.toArray(new Entity[dependents.size()]);
-        return new EntityList(dependentEntities, entity);
-    }
-
-    public static Pair<Date, Date> getEntityStartEndDates(Entity entityObject) {
-        Set<String> clusters = EntityUtil.getClustersDefined(entityObject);
-        Pair<Date, String> clusterMinStartDate = null;
-        Pair<Date, String> clusterMaxEndDate = null;
-        for (String cluster : clusters) {
-            if (clusterMinStartDate == null || clusterMinStartDate.first.after(getStartTime(entityObject, cluster))) {
-                clusterMinStartDate = Pair.of(getStartTime(entityObject, cluster), cluster);
-            }
-            if (clusterMaxEndDate == null || clusterMaxEndDate.first.before(getEndTime(entityObject, cluster))) {
-                clusterMaxEndDate = Pair.of(getEndTime(entityObject, cluster), cluster);
-            }
-        }
-        return new Pair<Date, Date>(clusterMinStartDate.first, clusterMaxEndDate.first);
-    }
-
-    /**
-     * Returns the previous instance(before or on) for a given referenceTime
-     *
-     * Example: For a feed in "UTC" with startDate "2014-01-01 00:00" and frequency of  "days(1)" a referenceTime
-     * of "2015-01-01 00:00" will return "2015-01-01 00:00".
-     *
-     * Similarly for the above feed if we give a reference Time of "2015-01-01 04:00" will also result in
-     * "2015-01-01 00:00"
-     *
-     * @param startTime start time of the entity
-     * @param frequency frequency of the entity
-     * @param tz timezone of the entity
-     * @param referenceTime time before which the instanceTime is desired
-     * @return  instance(before or on) the referenceTime
-     */
-    public static Date getPreviousInstanceTime(Date startTime, Frequency frequency, TimeZone tz, Date referenceTime) {
-        if (tz == null) {
-            tz = TimeZone.getTimeZone("UTC");
-        }
-        Calendar insCal = Calendar.getInstance(tz);
-        insCal.setTime(startTime);
-
-        int instanceCount = getInstanceSequence(startTime, frequency, tz, referenceTime) - 1;
-        final int freq = frequency.getFrequencyAsInt() * instanceCount;
-        insCal.add(frequency.getTimeUnit().getCalendarUnit(), freq);
-
-        while (insCal.getTime().after(referenceTime)) {
-            insCal.add(frequency.getTimeUnit().getCalendarUnit(), frequency.getFrequencyAsInt() * -1);
-        }
-        return insCal.getTime();
-    }
-
-    /**
-     * Find the times at which the given entity will run in a given time range.
-     * <p/>
-     * Both start and end Date are inclusive.
-     *
-     * @param entity      feed or process entity whose instance times are to be found
-     * @param clusterName name of the cluster
-     * @param startRange  start time for the input range
-     * @param endRange    end time for the input range
-     * @return List of instance times at which the entity will run in the given time range
-     */
-    public static List<Date> getEntityInstanceTimes(Entity entity, String clusterName, Date startRange, Date endRange) {
-        Date start = null;
-        switch (entity.getEntityType()) {
-
-        case FEED:
-            Feed feed = (Feed) entity;
-            start = FeedHelper.getCluster(feed, clusterName).getValidity().getStart();
-            return getInstanceTimes(start, feed.getFrequency(), feed.getTimezone(),
-                    startRange, endRange);
-
-        case PROCESS:
-            Process process = (Process) entity;
-            start = ProcessHelper.getCluster(process, clusterName).getValidity().getStart();
-            return getInstanceTimes(start, process.getFrequency(),
-                    process.getTimezone(), startRange, endRange);
-
-        default:
-            throw new IllegalArgumentException("Unhandled type: " + entity.getEntityType());
-        }
-    }
-
-
-    /**
-     * Find instance times given first instance start time and frequency till a given end time.
-     *
-     * It finds the first valid instance time for the given time range, it then uses frequency to find next instances
-     * in the given time range.
-     *
-     * @param startTime startTime of the entity (time of first instance ever of the given entity)
-     * @param frequency frequency of the entity
-     * @param timeZone  timeZone of the entity
-     * @param startRange start time for the input range of interest.
-     * @param endRange end time for the input range of interest.
-     * @return list of instance run times of the given entity in the given time range.
-     */
-    public static List<Date> getInstanceTimes(Date startTime, Frequency frequency, TimeZone timeZone,
-                                              Date startRange, Date endRange) {
-        List<Date> result = new LinkedList<>();
-        if (timeZone == null) {
-            timeZone = TimeZone.getTimeZone("UTC");
-        }
-
-        Date current = getPreviousInstanceTime(startTime, frequency, timeZone, startRange);
-        while (true) {
-            Date nextStartTime = getNextStartTime(startTime, frequency, timeZone, current);
-            if (nextStartTime.after(endRange)){
-                break;
-            }
-            result.add(nextStartTime);
-            // this is required because getNextStartTime returns greater than or equal to referenceTime
-            current = new Date(nextStartTime.getTime() + ONE_MS); // 1 milli seconds later
-        }
-        return result;
-    }
-
-    /**
-     * Returns Data Source Type given a feed with Import policy.
-     *
-     * @param cluster
-     * @param feed
-     * @return
-     * @throws FalconException
-     */
-
-    public static DatasourceType getImportDatasourceType(
-            Cluster cluster, Feed feed) throws FalconException {
-        return FeedHelper.getImportDatasourceType(cluster, feed);
-    }
-
-    /**
-     * Returns Data Source Type given a feed with Export policy.
-     *
-     * @param cluster
-     * @param feed
-     * @return
-     * @throws FalconException
-     */
-
-    public static DatasourceType getExportDatasourceType(
-            Cluster cluster, Feed feed) throws FalconException {
-        return FeedHelper.getExportDatasourceType(cluster, feed);
-    }
-
-    public static EntityNotification getEntityNotification(Entity entity) {
-        switch (entity.getEntityType()) {
-        case FEED:
-            Feed feed = (Feed) entity;
-            return feed.getNotification();
-        case PROCESS:
-            Process process = (Process) entity;
-            return process.getNotification();
-
-        default:
-            throw new IllegalArgumentException("Unhandled type: " + entity.getEntityType());
-        }
-    }
-
-
-    /**
-     * @param properties - String of format key1:value1, key2:value2
-     * @return
-     */
-    public static Map<String, String> getPropertyMap(String properties) {
-        Map<String, String> props = new HashMap<>();
-        if (StringUtils.isNotEmpty(properties)) {
-            String[] kvPairs = properties.split(",");
-            for (String kvPair : kvPairs) {
-                String[] keyValue = kvPair.trim().split(":", 2);
-                if (keyValue.length == 2 && !keyValue[0].trim().isEmpty() && !keyValue[1].trim().isEmpty()) {
-                    props.put(keyValue[0].trim(), keyValue[1].trim());
-                } else {
-                    throw new IllegalArgumentException("Found invalid property " + keyValue[0]
-                            + ". Schedule properties must be comma separated key-value pairs. "
-                            + " Example: key1:value1,key2:value2");
-                }
-            }
-        }
-        return props;
-    }
-
-    public static JOBPRIORITY getPriority(Process process) {
-        org.apache.falcon.entity.v0.process.Properties processProps = process.getProperties();
-        if (processProps != null) {
-            for (org.apache.falcon.entity.v0.process.Property prop : processProps.getProperties()) {
-                if (prop.getName().equals(MR_JOB_PRIORITY)) {
-                    return JOBPRIORITY.valueOf(prop.getValue());
-                }
-            }
-        }
-        return JOBPRIORITY.NORMAL;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/common/src/main/java/org/apache/falcon/entity/ExternalId.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/ExternalId.java b/common/src/main/java/org/apache/falcon/entity/ExternalId.java
deleted file mode 100644
index 688d5a6..0000000
--- a/common/src/main/java/org/apache/falcon/entity/ExternalId.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.entity;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.Tag;
-import org.apache.falcon.entity.v0.SchemaHelper;
-
-import java.util.Date;
-
-/**
- * External id as represented by workflow engine.
- */
-public class ExternalId {
-    private static final String SEPARATOR = "/";
-    private String id;
-
-    public ExternalId(String id) {
-        this.id = id;
-    }
-
-    public String getId() {
-        return id;
-    }
-
-    public ExternalId(String name, Tag tag, String elexpr) {
-        if (StringUtils.isEmpty(name) || tag == null || StringUtils.isEmpty(elexpr)) {
-            throw new IllegalArgumentException("Empty inputs!");
-        }
-
-        id = name + SEPARATOR + tag.name() + SEPARATOR + elexpr;
-    }
-
-    public ExternalId(String name, Tag tag, Date date) {
-        this(name, tag, SchemaHelper.formatDateUTC(date));
-    }
-
-    public String getName() {
-        String[] parts = id.split(SEPARATOR);
-        return parts[0];
-    }
-
-    public Date getDate() throws FalconException {
-        return EntityUtil.parseDateUTC(getDateAsString());
-    }
-
-    public String getDateAsString() {
-        String[] parts = id.split(SEPARATOR);
-        return parts[2];
-    }
-
-    public Tag getTag() {
-        String[] parts = id.split(SEPARATOR);
-        return Tag.valueOf(parts[1]);
-    }
-
-    public String getDFSname() {
-        return id.replace(":", "-");
-    }
-}


[02/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceStatusTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceStatusTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceStatusTest.java
deleted file mode 100644
index 14ecfe4..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceStatusTest.java
+++ /dev/null
@@ -1,495 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.APIResult;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction.Status;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Process instance status tests.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceStatusTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private String feedInputTimedOutPath = baseTestHDFSDir + "/timedoutStatus"
-        + MINUTE_DATE_PATTERN;
-    private String feedOutputTimedOutPath =
-        baseTestHDFSDir + "/output-data/timedoutStatus" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(ProcessInstanceStatusTest.class);
-    private static final double TIMEOUT = 15;
-    private String processName;
-    private OozieClient clusterOC = serverOC.get(0);
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    /**
-     *  Configures general process definition which particular properties can be overwritten.
-     */
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:22Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        processName = bundles[0].getProcessName();
-        HadoopUtil.deleteDirIfExists(baseTestHDFSDir + "/input", clusterFS);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * time out is set as 3 minutes .... getStatus is for a large range in past.
-     * 6 instance should be materialized and one in running and other in waiting
-     * Adding logging information test as part of FALCON-813.
-     * In case status does not contain jobId of instance the test should fail.
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusStartAndEndCheckNoInstanceAfterEndDate()
-        throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-03T10:22Z");
-        bundles[0].setProcessTimeOut(3, TimeUnit.minutes);
-        bundles[0].setProcessPeriodicity(1, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1, Status.RUNNING, EntityType.PROCESS);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T10:20Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-        InstanceUtil.validateResponse(r, 6, 1, 0, 5, 0);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Perform -getStatus using only -start parameter within time-range of non-materialized
-     * instances. There should be no instances returned in response.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusOnlyStartAfterMat() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-03T10:22Z");
-        bundles[0].setProcessTimeOut(3, TimeUnit.minutes);
-        bundles[0].setProcessPeriodicity(1, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T05:00Z");
-        AssertUtil.assertSucceeded(r);
-        Assert.assertEquals(r.getInstances(), null);
-    }
-
-    /**
-     * Schedule process. Perform -getStatus using -end parameter which is out of process
-     * validity range. Attempt should succeed with end defaulted to entity end.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusEndOutOfRange() throws Exception {
-        HadoopUtil.deleteDirIfExists(baseTestHDFSDir + "/input", clusterFS);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-                "?start=2010-01-02T01:00Z&end=2010-01-02T01:30Z");
-        InstanceUtil.validateResponse(r, 5, 0, 0, 5, 0);
-    }
-
-    /**
-     * Schedule process and try to -getStatus without date parameters. Attempt should succeed. Start defaults
-     * to start of entity and end defaults to end of entity.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusDateEmpty()
-        throws JAXBException, AuthenticationException, IOException, URISyntaxException,
-        OozieClientException, InterruptedException {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:30Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(0), processName, 5,
-            Status.RUNNING, EntityType.PROCESS);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName, null);
-        InstanceUtil.validateResponse(r, 6, 5, 0, 1, 0);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule process with number of instances. Perform -getStatus request with valid
-     * parameters. Attempt should succeed.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-    *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusStartAndEnd() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(0), processName, 1 ,
-               Status.RUNNING, EntityType.PROCESS);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:20Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule process. Perform -getStatus using -start parameter which is out of process
-     * validity range. Attempt should succeed, with start defaulted to entity start time.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusStartOutOfRange() throws Exception {
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T00:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule and then delete process. Try to get the status of its instances. Attempt should
-     * fail with an appropriate code.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusKilled() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().delete(bundles[0].getProcessData()));
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:20Z");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Schedule process and then suspend it. -getStatus of first instance only -start parameter.
-     * Instance should be suspended.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusOnlyStartSuspended() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1, Status.RUNNING, EntityType.PROCESS);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(bundles[0].getProcessData()));
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z");
-        Assert.assertEquals(r.getStatus(), APIResult.Status.SUCCEEDED);
-        Assert.assertEquals(InstanceUtil.instancesInResultWithStatus(r, WorkflowStatus.SUSPENDED), 1);
-    }
-
-    /**
-     * Schedule process. Try to -getStatus using -start/-end parameters with values which were
-     * reversed i.e. -start is further than -end. Attempt should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusReverseDateRange() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(0), processName, 1,
-            Status.RUNNING, EntityType.PROCESS);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:20Z&end=2010-01-02T01:07Z");
-        InstanceUtil.validateError(r, ResponseErrors.START_BEFORE_SCHEDULED);
-    }
-
-    /**
-     * Schedule process. Perform -getStatus using -start/-end parameters which are out of process
-     * validity range. Attempt should succeed, with start/end defaulted to entity's start/end.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusStartEndOutOfRange() throws Exception {
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(2);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 2,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T00:00Z&end=2010-01-02T01:30Z");
-        InstanceUtil.validateResponse(r, 5, 2, 0, 3, 0);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule process. Suspend and then resume it. -getStatus of its instances. Check that
-     * response reflects that instances are running.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusResumed() throws Exception {
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String process = bundles[0].getProcessData();
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().suspend(process));
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.SUSPENDED);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5, Status.SUSPENDED, EntityType.PROCESS, 3);
-        TimeUtil.sleepSeconds(TIMEOUT);
-        AssertUtil.assertSucceeded(prism.getProcessHelper().resume(process));
-        TimeUtil.sleepSeconds(TIMEOUT);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:22Z");
-        InstanceUtil.validateSuccess(r, bundles[0], WorkflowStatus.RUNNING);
-    }
-
-    /**
-     * Schedule process. -getStatus of it's first instance using only -start parameter which
-     * points to start time of process validity. Check that response reflects expected status of
-     * instance.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusOnlyStart() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 1,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z");
-        InstanceUtil.validateResponse(r, 5, 1, 0, 4, 0);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule process. Try to perform -getStatus using valid -start parameter but invalid
-     * process name. Attempt should fail with an appropriate status code.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusInvalidName() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T02:30Z");
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstancesResult r = prism.getProcessHelper()
-            .getProcessInstanceStatus("invalidProcess", "?start=2010-01-01T01:00Z");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Schedule process. Try to -getStatus without time range parameters. Attempt succeeds.
-     * Adding logging information test as part of status information.
-     * In case status does not contain jobId of instance the test should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusWoParams() throws Exception {
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].submitFeedsScheduleProcess(prism);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, bundles[0].getProcessName(), EntityType.PROCESS);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            Status.RUNNING, EntityType.PROCESS, 5);
-        List<String> oozieWfIDs = OozieUtil.getWorkflow(clusterOC, bundleId);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName, null);
-        InstanceUtil.validateResponse(r, 5, 5, 0, 0, 0);
-        List<String> instanceWfIDs = InstanceUtil.getWorkflowJobIds(r);
-        Assert.assertTrue(matchWorkflows(instanceWfIDs, oozieWfIDs), "No job ids exposed in status message");
-    }
-
-    /**
-     * Schedule process with timeout set to 2 minutes. Wait till it become timed-out. -getStatus
-     * of that process. Check that all materialized instances are failed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceStatusTimedOut() throws Exception {
-        bundles[0].setInputFeedDataPath(feedInputTimedOutPath);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setProcessTimeOut(2, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(feedOutputTimedOutPath);
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3, Status.TIMEDOUT,
-            EntityType.PROCESS);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:11Z");
-        InstanceUtil.validateFailedInstances(r, 3);
-    }
-
-    /**
-     * Check that default end time param value is now.
-     */
-    @Test
-    public void testDefaultEndTimeParam()
-        throws OozieClientException, IOException, InterruptedException, AuthenticationException, URISyntaxException,
-        JAXBException {
-        //set validity to have 12 instances
-        String start = TimeUtil.getTimeWrtSystemTime(-60);
-        String end = TimeUtil.getTimeWrtSystemTime(0);
-        bundles[0].setProcessValidity(start, end);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        //make first 3 instances running
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 1);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 2);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3, Status.RUNNING,
-            EntityType.PROCESS);
-        //check instances status with end, expected first 10 instances
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=" + start + "&end=" + TimeUtil.addMinsToTime(end, -11));
-        InstanceUtil.validateResponse(r, 10, 3, 0, 7, 0);
-        //request the same but without end, expected to have the latest 10 instances
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=" + start);
-        InstanceUtil.validateResponse(r, 10, 1, 0, 9, 0);
-        //the same with numResults which includes/excludes all running instances
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=" + start + "&end=" + TimeUtil.addMinsToTime(end, -16) + "&numResults=9");
-        InstanceUtil.validateResponse(r, 9, 3, 0, 6, 0);
-        //expected end is set to now, thus getting last 9 instances
-        r = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=" + start + "&numResults=9");
-        InstanceUtil.validateResponse(r, 9, 0, 0, 9, 0);
-    }
-
-    /*
-    * Function to match the workflows obtained from instance status and oozie.
-    */
-    private boolean matchWorkflows(List<String> instanceWf, List<String> oozieWf) {
-        Collections.sort(instanceWf);
-        Collections.sort(oozieWf);
-        if (instanceWf.size() != oozieWf.size()) {
-            return false;
-        }
-        for (int index = 0; index < instanceWf.size(); index++) {
-            if (!instanceWf.get(index).contains(oozieWf.get(index))) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceSuspendTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceSuspendTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceSuspendTest.java
deleted file mode 100644
index 4a27a0a..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessInstanceSuspendTest.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.enumsAndConstants.ResponseErrors;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-
-
-/**
- * Process instance suspend tests.
- */
-@Test(groups = "embedded")
-public class ProcessInstanceSuspendTest extends BaseTestClass {
-
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String processName;
-    private OozieClient clusterOC = serverOC.get(0);
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setInputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-        processName = bundles[0].getProcessName();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-        HadoopUtil.deleteDirIfExists(baseTestHDFSDir, clusterFS);
-    }
-
-    /**
-     * Schedule process. Try to suspend instances with start/end parameters which are
-     * wider then process validity range. Succeeds.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendLargeRange() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(result, 5, 5, 0, 0, 0);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T00:00Z&end=2010-01-02T01:30Z");
-        result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T00:00Z&end=2010-01-02T01:30Z");
-        InstanceUtil.validateResponse(result, 5, 0, 5, 0, 0);
-    }
-
-    /**
-     * Schedule single-instance process. Wait till instance succeed. Try to suspend
-     * succeeded instance. Action should be performed successfully as indempotent action.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendSucceeded() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:01Z");
-        AssertUtil.assertSucceeded(r);
-    }
-
-    /**
-     * Schedule process. Check that all instances are running. Suspend them. Check that all are
-     * suspended. In every action valid time range is used.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendAll() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(result, 5, 5, 0, 0, 0);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(result, 5, 0, 5, 0, 0);
-    }
-
-    /**
-     * Schedule process and try to perform -suspend action without date range parameters.
-     * Attempt should fail. Will fail because of jira : https://issues.apache.org/jira/browse/FALCON-710
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendWoParams() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:22Z");
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceSuspend(processName, null);
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process with 3 running and 2 waiting instances expected. Suspend ones which are
-     * running. Check that now 3 are suspended and 2 are still waiting.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendStartAndEnd() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 1);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0, 2);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:22Z");
-        InstanceUtil.validateResponse(result, 5, 3, 0, 2, 0);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:15Z");
-        result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:22Z");
-        InstanceUtil.validateResponse(result, 5, 0, 3, 2, 0);
-    }
-
-    /**
-     * Try to suspend process which wasn't submitted and scheduled. Action should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendNonExistent() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstancesResult r = prism.getProcessHelper()
-            .getProcessInstanceSuspend("invalidName", "?start=2010-01-02T01:20Z");
-        InstanceUtil.validateError(r, ResponseErrors.PROCESS_NOT_FOUND);
-    }
-
-    /**
-     * Schedule process. Perform -suspend action using only -start parameter which points to start
-     * time of process. Attempt suspends all instances
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendOnlyStart() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceSuspend(processName,
-            "?start=2010-01-02T01:00Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process. Perform -suspend action using only -end parameter.
-     * Should fail with appropriate status message.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendOnlyEnd() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:11Z");
-        bundles[0].setProcessConcurrency(3);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 3,
-                CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult r = prism.getProcessHelper().getProcessInstanceSuspend(processName,
-                "?end=2010-01-02T01:05Z");
-        InstanceUtil.validateError(r, ResponseErrors.UNPARSEABLE_DATE);
-    }
-
-    /**
-     * Schedule process with a number of instances running. Perform -suspend action using params
-     * such that they aim to suspend the last instance. Check that only
-     * the last instance is suspended.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void testProcessInstanceSuspendSuspendLast() throws Exception {
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:23Z");
-        bundles[0].setProcessConcurrency(5);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, processName, 5,
-            CoordinatorAction.Status.RUNNING, EntityType.PROCESS, 5);
-        InstancesResult result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(result, 5, 5, 0, 0, 0);
-        prism.getProcessHelper().getProcessInstanceSuspend(processName,
-                "?start=2010-01-02T01:20Z&end=2010-01-02T01:23Z");
-        result = prism.getProcessHelper().getProcessInstanceStatus(processName,
-            "?start=2010-01-02T01:00Z&end=2010-01-02T01:21Z");
-        InstanceUtil.validateResponse(result, 5, 4, 1, 0, 0);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLateRerunTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLateRerunTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLateRerunTest.java
deleted file mode 100644
index 6a12fc8..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLateRerunTest.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.Input;
-import org.apache.falcon.entity.v0.process.LateInput;
-import org.apache.falcon.entity.v0.process.LateProcess;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.TestNGException;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * Process late data test.
- */
-@Test(groups = "embedded")
-public class ProcessLateRerunTest extends BaseTestClass {
-    private ColoHelper cluster1 = servers.get(0);
-    private OozieClient cluster1OC = serverOC.get(0);
-    private FileSystem cluster1FS = serverFS.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(ProcessLateRerunTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        Bundle bundle = BundleUtil.readLateDataBundle();
-        bundles[0] = new Bundle(bundle, servers.get(0));
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Test demonstrates rerunning process for late arrival of data.
-     * Initially there is no input data and empty folders are processed.
-     * It checks the number of rerun attempts once late data has been added
-     * ensuring that late rerun happened.
-     */
-    @Test(enabled = true)
-    public void testProcessLateRerunOnEmptyFolder() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 30);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(10, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(10, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(2);
-
-        String inputName = bundles[0].getProcessObject().getFirstInputName();
-        bundles[0].setProcessLatePolicy(getLateData(2, "minutes", "periodic", inputName, aggregateWorkflowDir));
-
-        bundles[0].submitAndScheduleProcess();
-        AssertUtil.checkStatus(cluster1OC, EntityType.PROCESS, bundles[0], Job.Status.RUNNING);
-        TimeUtil.sleepSeconds(10);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, bundles[0].getProcessData(), 0);
-
-        getAndCreateDependencies(cluster1, bundles[0], cluster1OC, cluster1FS, false, 1);
-
-        int sleepMins = 6;
-        for(int i=0; i < sleepMins; i++) {
-            LOGGER.info("Waiting...");
-            TimeUtil.sleepSeconds(60);
-        }
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, bundles[0].getProcessName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        List<String> bundleList =  OozieUtil.getBundles(cluster1.getFeedHelper().getOozieClient(),
-            bundles[0].getProcessName(), EntityType.PROCESS);
-        String bundleID = bundleList.get(0);
-
-        OozieUtil.validateRetryAttempts(cluster1OC, bundleID, EntityType.PROCESS, 1);
-    }
-
-    /**
-     * Test demonstrates rerunning process for late arrival of data.
-     * Initially there is some data which is processed. It checks the number of rerun attempts
-     * once further more data has been added ensuring that late rerun happened.
-     */
-    @Test(enabled = true)
-    public void testProcessLateRerunWithData() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 30);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(5, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(2);
-
-        String inputName = bundles[0].getProcessObject().getFirstInputName();
-
-        bundles[0].setProcessLatePolicy(getLateData(4, "minutes", "periodic", inputName, aggregateWorkflowDir));
-        bundles[0].submitAndScheduleProcess();
-        AssertUtil.checkStatus(cluster1OC, EntityType.PROCESS, bundles[0], Job.Status.RUNNING);
-        TimeUtil.sleepSeconds(10);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, bundles[0].getProcessData(), 0);
-
-        getAndCreateDependencies(cluster1, bundles[0], cluster1OC, cluster1FS, true, 1);
-
-        int sleepMins = 6;
-        for(int i=0; i < sleepMins; i++) {
-            LOGGER.info("Waiting...");
-            TimeUtil.sleepSeconds(60);
-        }
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, bundles[0].getProcessName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        List<String> bundleList =  OozieUtil.getBundles(cluster1.getFeedHelper().getOozieClient(),
-            bundles[0].getProcessName(), EntityType.PROCESS);
-        String bundleID = bundleList.get(0);
-
-        OozieUtil.validateRetryAttempts(cluster1OC, bundleID, EntityType.PROCESS, 1);
-    }
-
-    /**
-     * Test demonstrates rerunning process for late arrival of data for multiple input folders.
-     * It checks the number of rerun attempts once further more data has been added ensuring that late rerun happened.
-     */
-    @Test(enabled = true)
-    public void testProcessLateRerunWithMultipleFolders() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 30);
-        String startInstance = "now(0,-5)";
-        String endInstance = "now(0,0)";
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(10, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(10, Frequency.TimeUnit.minutes);
-        String inputName = bundles[0].getProcessObject().getFirstInputName();
-
-        bundles[0].setProcessLatePolicy(getLateData(4, "minutes", "periodic", inputName, aggregateWorkflowDir));
-        bundles[0].setProcessConcurrency(2);
-
-        // Increase the window of input for process
-        bundles[0].setDatasetInstances(startInstance, endInstance);
-        bundles[0].submitAndScheduleProcess();
-
-        AssertUtil.checkStatus(cluster1OC, EntityType.PROCESS, bundles[0], Job.Status.RUNNING);
-        TimeUtil.sleepSeconds(10);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, bundles[0].getProcessData(), 0);
-
-        getAndCreateDependencies(cluster1, bundles[0], cluster1OC, cluster1FS, false, 3);
-
-        int sleepMins = 6;
-        for(int i=0; i < sleepMins; i++) {
-            LOGGER.info("Waiting...");
-            TimeUtil.sleepSeconds(60);
-        }
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, bundles[0].getProcessName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        List<String> bundleList =  OozieUtil.getBundles(cluster1.getFeedHelper().getOozieClient(),
-            bundles[0].getProcessName(), EntityType.PROCESS);
-        String bundleID = bundleList.get(0);
-
-        OozieUtil.validateRetryAttempts(cluster1OC, bundleID, EntityType.PROCESS, 1);
-    }
-
-    /**
-     * Test demonstrates rerunning process for late arrival of data for gate folders.
-     * Late rerun will not work on gate folder, so no retry attempt on the appended data.
-     */
-    @Test(enabled = true)
-    public void testProcessLateRerunWithGate() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 30);
-        String startInstance = "now(0,-5)";
-        String endInstance = "now(0,0)";
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(10, Frequency.TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(10, Frequency.TimeUnit.minutes);
-        bundles[0].setProcessConcurrency(2);
-
-        // Increase the window of input for process
-        bundles[0].setDatasetInstances(startInstance, endInstance);
-
-        ProcessMerlin process = bundles[0].getProcessObject();
-        String inputName = process.getFirstInputName();
-        Input tempFeed = process.getInputs().getInputs().get(0);
-
-        Input gateInput = new Input();
-        gateInput.setName("Gate");
-        gateInput.setFeed(tempFeed.getFeed());
-        gateInput.setEnd("now(0,1)");
-        gateInput.setStart("now(0,1)");
-        process.getInputs().getInputs().add(gateInput);
-        bundles[0].setProcessData(process.toString());
-
-        bundles[0].setProcessLatePolicy(getLateData(4, "minutes", "periodic", inputName, aggregateWorkflowDir));
-
-        bundles[0].submitAndScheduleProcess();
-        AssertUtil.checkStatus(cluster1OC, EntityType.PROCESS, bundles[0], Job.Status.RUNNING);
-
-        TimeUtil.sleepSeconds(10);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, bundles[0].getProcessData(), 0);
-
-        getAndCreateDependencies(cluster1, bundles[0], cluster1OC, cluster1FS, false, 7);
-
-        int sleepMins = 6;
-        for(int i=0; i < sleepMins; i++) {
-            LOGGER.info("Waiting...");
-            TimeUtil.sleepSeconds(60);
-        }
-
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, bundles[0].getProcessName(), 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        List<String> bundleList =  OozieUtil.getBundles(cluster1.getFeedHelper().getOozieClient(),
-            bundles[0].getProcessName(), EntityType.PROCESS);
-        String bundleID = bundleList.get(0);
-
-        OozieUtil.validateRetryAttempts(cluster1OC, bundleID, EntityType.PROCESS, 0);
-    }
-
-    /*
-    dataFlag - denotes whether process should run initially on empty folders or folders containing data
-    dataFolder - denotes the folder where you want to upload data for late rerun
-     */
-    private void getAndCreateDependencies(ColoHelper prismHelper, Bundle bundle,
-                                          OozieClient oozieClient, FileSystem clusterFS,
-                                          boolean dataFlag, int dataFolder) {
-        try {
-            List<String> bundles = null;
-            for (int i = 0; i < 10; ++i) {
-                bundles = OozieUtil.getBundles(prismHelper.getFeedHelper().getOozieClient(),
-                    bundle.getProcessName(), EntityType.PROCESS);
-                if (bundles.size() > 0) {
-                    break;
-                }
-                TimeUtil.sleepSeconds(30);
-            }
-            Assert.assertTrue(bundles != null && bundles.size() > 0, "Bundle job not created.");
-            String bundleID = bundles.get(0);
-            LOGGER.info("bundle id: " + bundleID);
-            List<String> missingDependencies = OozieUtil.getMissingDependencies(oozieClient, bundleID);
-            for (int i = 0; i < 10 && missingDependencies == null; ++i) {
-                TimeUtil.sleepSeconds(30);
-                missingDependencies = OozieUtil.getMissingDependencies(oozieClient, bundleID);
-            }
-            Assert.assertNotNull(missingDependencies, "Missing dependencies not found.");
-
-            //print missing dependencies
-            for (String dependency : missingDependencies) {
-                LOGGER.info("dependency from job: " + dependency);
-            }
-
-            //create missing dependencies
-            LOGGER.info("Creating missing dependencies...");
-            OozieUtil.createMissingDependencies(prismHelper, EntityType.PROCESS, bundle.getProcessName(), 0, 0);
-
-            //Adding data to empty folders depending on dataFlag
-            if (dataFlag) {
-                int tempCount = 1;
-                for (String location : missingDependencies) {
-                    if (tempCount==1) {
-                        LOGGER.info("Transferring data to : " + location);
-                        HadoopUtil.copyDataToFolder(clusterFS, location,
-                            OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-                        tempCount++;
-                    }
-                }
-            }
-
-            //Process succeeding on empty folders
-            LOGGER.info("Waiting for process to succeed...");
-            InstanceUtil.waitTillInstanceReachState(oozieClient, bundle.getProcessName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-            TimeUtil.sleepSeconds(30);
-
-            //Adding data to check late rerun
-            int tempCounter = 1;
-            for (String dependency : missingDependencies) {
-                if (tempCounter==dataFolder) {
-                    LOGGER.info("Transferring late data to : " + dependency);
-                    HadoopUtil.copyDataToFolder(clusterFS, dependency,
-                        OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.properties"));
-                }
-                tempCounter++;
-            }
-
-        } catch (Exception e) {
-            e.printStackTrace();
-            throw new TestNGException(e);
-        }
-    }
-
-    private static LateProcess getLateData(int delay, String delayUnits, String retryType,
-                                           String inputData, String workflowDir) {
-        LateInput lateInput = new LateInput();
-        lateInput.setInput(inputData);
-        lateInput.setWorkflowPath(workflowDir);
-        LateProcess lateProcess = new LateProcess();
-        lateProcess.setDelay(new Frequency(delayUnits + "(" + delay + ")"));
-        lateProcess.setPolicy(PolicyType.fromValue(retryType));
-        lateProcess.getLateInputs().add(lateInput);
-        return lateProcess;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathLoadTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathLoadTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathLoadTest.java
deleted file mode 100644
index 8422796..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathLoadTest.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.Job.Status;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.FileOutputStream;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Tests with process lib folder with workflow.xml.
- */
-@Test(groups = "embedded")
-public class ProcessLibPathLoadTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String testDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = testDir + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(ProcessLibPathLoadTest.class);
-
-    private String oozieLib = MerlinConstants.OOZIE_EXAMPLE_LIB;
-    private String oozieLibName = oozieLib.substring(oozieLib.lastIndexOf('/') + 1);
-    private String filename = OSUtil.concat(OSUtil.OOZIE_LIB_FOLDER, "lib", oozieLibName);
-    private String processName;
-    private String process;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        FileUtils.forceMkdir(new File(OSUtil.concat(OSUtil.OOZIE_LIB_FOLDER, "lib")));
-        saveUrlToFile(oozieLib);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.OOZIE_LIB_FOLDER);
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(testDir + MINUTE_DATE_PATTERN);
-        bundles[0].setProcessValidity("2015-01-02T01:00Z", "2015-01-02T01:04Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(testDir + "/output-data" + MINUTE_DATE_PATTERN);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].setProcessLibPath(aggregateWorkflowDir + "/lib");
-        process = bundles[0].getProcessData();
-        processName = Util.readEntityName(process);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    @AfterClass(alwaysRun = true)
-    public void deleteJar() throws IOException {
-        File file = new File(filename);
-        Assert.assertEquals(file.delete(), true, filename + " is not present.");
-        FileUtils.deleteDirectory(new File(OSUtil.concat(OSUtil.OOZIE_LIB_FOLDER, "lib")));
-    }
-
-    /**
-     * Test which test a process with jar in lib location.
-     * Schedule a process, it should succeed.
-     *
-     * @throws Exception
-     */
-    @Test
-    public void setRightJarInWorkflowLib() throws Exception {
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        OozieUtil.waitForBundleToReachState(clusterOC, processName, Status.SUCCEEDED);
-    }
-
-    /**
-     * Test which test a process with no jar in lib location.
-     * Schedule a process, it should get killed.
-     *
-     * @throws Exception
-     */
-    @Test
-    public void setNoJarInWorkflowLibLocation() throws Exception {
-        HadoopUtil.deleteDirIfExists(aggregateWorkflowDir + "/lib/" + oozieLibName, clusterFS);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        OozieUtil.waitForBundleToReachState(clusterOC, processName, Status.KILLED);
-    }
-
-    /**
-     * Function to download jar at remote public location.
-     * @param urlString public location from where jar is to be downloaded
-     * filename is the location where the jar is to be saved
-     * @throws Exception
-     */
-    private void saveUrlToFile(String urlString)
-        throws IOException {
-
-        URL url = new URL(urlString);
-        String link;
-        HttpURLConnection http = (HttpURLConnection) url.openConnection();
-        Map<String, List<String>> header = http.getHeaderFields();
-        while (isRedirected(header)) {
-            link = header.get("Location").get(0);
-            url = new URL(link);
-            http = (HttpURLConnection) url.openConnection();
-            header = http.getHeaderFields();
-        }
-
-        InputStream input = http.getInputStream();
-        byte[] buffer = new byte[4096];
-        int n;
-        OutputStream output = new FileOutputStream(new File(filename));
-        while ((n = input.read(buffer)) != -1) {
-            output.write(buffer, 0, n);
-        }
-        output.close();
-    }
-
-    private static boolean isRedirected(Map<String, List<String>> header) {
-        for (String hv : header.get(null)) {
-            if (hv.contains(" 301 ") || hv.contains(" 302 ")) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathTest.java
deleted file mode 100644
index 4196d99..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessLibPathTest.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.Job.Status;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * Tests with process lib folder detached from workflow.xml.
- */
-@Test(groups = "embedded")
-public class ProcessLibPathTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private String testDir = cleanAndGetTestDir();
-    private String testLibDir = testDir + "/TestLib";
-    private static final Logger LOGGER = Logger.getLogger(ProcessLibPathTest.class);
-    private String processName;
-    private String process;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        LOGGER.info("in @BeforeClass");
-        Bundle b = BundleUtil.readELBundle();
-        b.generateUniqueBundle(this);
-        b = new Bundle(b, cluster);
-        String startDate = "2010-01-01T22:00Z";
-        String endDate = "2010-01-02T03:00Z";
-        b.setInputFeedDataPath(testDir + "/input" + MINUTE_DATE_PATTERN);
-        String prefix = b.getFeedDataPathPrefix();
-        HadoopUtil.deleteDirIfExists(prefix.substring(1), clusterFS);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 20);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT, prefix, dataDates);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(testDir + MINUTE_DATE_PATTERN);
-        bundles[0].setProcessValidity("2010-01-02T01:00Z", "2010-01-02T01:04Z");
-        bundles[0].setProcessPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(5, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(testDir + "/output-data" + MINUTE_DATE_PATTERN);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].setProcessLibPath(testLibDir);
-        process = bundles[0].getProcessData();
-        processName = Util.readEntityName(process);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Tests a process with no lib folder in workflow location.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void setDifferentLibPathWithNoLibFolderInWorkflowfLocaltion() throws Exception {
-        String workflowDir = testLibDir + "/aggregatorLib1/";
-        HadoopUtil.uploadDir(clusterFS, workflowDir, OSUtil.RESOURCES_OOZIE);
-        bundles[0].setProcessWorkflow(workflowDir);
-        LOGGER.info("processData: " + Util.prettyPrintXml(process));
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        OozieUtil.waitForBundleToReachState(clusterOC, processName, Status.SUCCEEDED);
-    }
-
-    /**
-     * Test which test a process with wrong jar in lib folder in workflow location.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void setDifferentLibPathWithWrongJarInWorkflowLib() throws Exception {
-        String workflowDir = testLibDir + "/aggregatorLib2/";
-        HadoopUtil.uploadDir(clusterFS, workflowDir, OSUtil.RESOURCES_OOZIE);
-        HadoopUtil.recreateDir(clusterFS, workflowDir + "/lib");
-        HadoopUtil.copyDataToFolder(clusterFS, workflowDir + "/lib/invalid.jar",
-            OSUtil.concat(OSUtil.NORMAL_INPUT, "dataFile.xml"));
-        bundles[0].setProcessWorkflow(workflowDir);
-        LOGGER.info("processData: " + Util.prettyPrintXml(process));
-        bundles[0].submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process, 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        OozieUtil.waitForBundleToReachState(clusterOC, processName, Status.SUCCEEDED);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessSLATest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessSLATest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessSLATest.java
deleted file mode 100644
index f4c9b30..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessSLATest.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
-* Process SLA tests.
-*/
-@Test(groups = "embedded")
-public class ProcessSLATest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private static final Logger LOGGER = Logger.getLogger(ProcessSLATest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        String startTime = TimeUtil.getTimeWrtSystemTime(0);
-        String endTime = TimeUtil.addMinsToTime(startTime, 20);
-        LOGGER.info("Time range between : " + startTime + " and " + endTime);
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundles[0], cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].submitClusters(prism);
-        bundles[0].setInputFeedDataPath(baseTestHDFSDir + MINUTE_DATE_PATTERN);
-        bundles[0].setOutputFeedLocationData(baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN);
-        bundles[0].setOutputFeedPeriodicity(5, Frequency.TimeUnit.minutes);
-        bundles[0].submitFeeds(prism);
-        bundles[0].setProcessConcurrency(1);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessValidity(startTime, endTime);
-        bundles[0].setProcessPeriodicity(5, Frequency.TimeUnit.minutes);
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedule process with correctly adjusted sla. Response should reflect success.
-     *
-     */
-    @Test
-    public void scheduleValidProcessSLA() throws Exception {
-
-        ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        processMerlin.setSla(new Frequency("3", Frequency.TimeUnit.hours),
-                new Frequency("6", Frequency.TimeUnit.hours));
-        bundles[0].setProcessData(processMerlin.toString());
-        ServiceResponse response = prism.getProcessHelper().submitAndSchedule(processMerlin.toString());
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Schedule process with slaStart and slaEnd having equal value. Response should reflect success.
-     *
-     */
-    @Test
-    public void scheduleProcessWithSameSLAStartSLAEnd() throws Exception {
-
-        ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        processMerlin.setSla(new Frequency("3", Frequency.TimeUnit.hours),
-                new Frequency("3", Frequency.TimeUnit.hours));
-        bundles[0].setProcessData(processMerlin.toString());
-        ServiceResponse response = prism.getProcessHelper().submitAndSchedule(processMerlin.toString());
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Schedule process with slaEnd less than slaStart. Response should reflect failure.
-     *
-     */
-    @Test
-    public void scheduleProcessWithSLAEndLowerthanSLAStart() throws Exception {
-
-        ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        processMerlin.setSla(new Frequency("4", Frequency.TimeUnit.hours),
-                new Frequency("2", Frequency.TimeUnit.hours));
-        bundles[0].setProcessData(processMerlin.toString());
-        ServiceResponse response = prism.getProcessHelper().submitAndSchedule(processMerlin.toString());
-        LOGGER.info("response : " + response.getMessage());
-
-        String message = "shouldStartIn of Process: " + processMerlin.getSla().getShouldStartIn().getTimeUnit() + "("
-                + processMerlin.getSla().getShouldStartIn().getFrequency() + ")is greater than shouldEndIn: "
-                + processMerlin.getSla().getShouldEndIn().getTimeUnit() +"("
-                + processMerlin.getSla().getShouldEndIn().getFrequency() + ")";
-        validate(response, message);
-    }
-
-    /**
-     * Schedule process with timeout greater than slaStart. Response should reflect success.
-     *
-     */
-    @Test
-    public void scheduleProcessWithTimeoutGreaterThanSLAStart() throws Exception {
-
-        ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        processMerlin.setTimeout(new Frequency("3", Frequency.TimeUnit.hours));
-        processMerlin.setSla(new Frequency("2", Frequency.TimeUnit.hours),
-                new Frequency("4", Frequency.TimeUnit.hours));
-        bundles[0].setProcessData(processMerlin.toString());
-        ServiceResponse response = prism.getProcessHelper().submitAndSchedule(processMerlin.toString());
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Schedule process with timeout less than slaStart. Response should reflect failure.
-     *
-     */
-    @Test
-    public void scheduleProcessWithTimeoutLessThanSLAStart() throws Exception {
-
-        ProcessMerlin processMerlin = bundles[0].getProcessObject();
-        processMerlin.setTimeout(new Frequency("1", Frequency.TimeUnit.hours));
-        processMerlin.setSla(new Frequency("2", Frequency.TimeUnit.hours),
-                new Frequency("4", Frequency.TimeUnit.hours));
-        bundles[0].setProcessData(processMerlin.toString());
-        ServiceResponse response = prism.getProcessHelper().submitAndSchedule(processMerlin.toString());
-
-        String message = "shouldStartIn of Process: " + processMerlin.getSla().getShouldStartIn().getTimeUnit() + "("
-                + processMerlin.getSla().getShouldStartIn().getFrequency() + ") is greater than timeout: "
-                +processMerlin.getTimeout().getTimeUnit() +"(" + processMerlin.getTimeout().getFrequency() + ")";
-        validate(response, message);
-    }
-
-    private void validate(ServiceResponse response, String message) throws Exception {
-        AssertUtil.assertFailed(response);
-        LOGGER.info("Expected message is : " + message);
-        Assert.assertTrue(response.getMessage().contains(message),
-                "Correct response was not present in process schedule. Process response is : "
-                        + response.getMessage());
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessUpdateTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessUpdateTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessUpdateTest.java
deleted file mode 100644
index dbb45a6..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/ProcessUpdateTest.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency;
-import org.apache.falcon.entity.v0.process.LateInput;
-import org.apache.falcon.entity.v0.process.LateProcess;
-import org.apache.falcon.entity.v0.process.PolicyType;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.OozieClient;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-/**
- * Tests related to update feature.
- */
-@Test(groups = "embedded")
-public class ProcessUpdateTest extends BaseTestClass {
-
-    private OozieClient clusterOC = serverOC.get(0);
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String feedInputPath = baseTestHDFSDir + "/input" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/output-data" + MINUTE_DATE_PATTERN;
-    private static final Logger LOGGER = Logger.getLogger(ProcessUpdateTest.class);
-
-    @BeforeClass(alwaysRun = true)
-    public void uploadWorkflow() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        Bundle bundle = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundle, servers.get(0));
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setInputFeedDataPath(feedInputPath);
-        bundles[0].setOutputFeedLocationData(feedOutputPath);
-    }
-
-    /**
-     * Test for https://issues.apache.org/jira/browse/FALCON-99.
-     * Scenario: schedule a process which doesn't have late data handling and then update it to have it.
-     * Check that new coordinator was created.
-     */
-    @Test
-    public void updateProcessWithLateData() throws Exception {
-        String start = TimeUtil.getTimeWrtSystemTime(-60);
-        String end = TimeUtil.getTimeWrtSystemTime(10);
-        bundles[0].submitAndScheduleAllFeeds();
-        ProcessMerlin process = bundles[0].getProcessObject();
-        process.setValidity(start, end);
-        process.setLateProcess(null);
-        prism.getProcessHelper().submitAndSchedule(process.toString());
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, process.toString(), 0);
-        String bundleId = OozieUtil.getLatestBundleID(clusterOC, process.getName(), EntityType.PROCESS);
-
-        //update process to have late data handling
-        LateProcess lateProcess = new LateProcess();
-        lateProcess.setDelay(new Frequency("hours(1)"));
-        lateProcess.setPolicy(PolicyType.EXP_BACKOFF);
-        LateInput lateInput = new LateInput();
-        lateInput.setInput("inputData");
-        lateInput.setWorkflowPath(aggregateWorkflowDir);
-        lateProcess.getLateInputs().add(lateInput);
-        process.setLateProcess(lateProcess);
-        LOGGER.info("Updated process xml: " + Util.prettyPrintXml(process.toString()));
-        AssertUtil.assertSucceeded(prism.getProcessHelper().update(process.toString(), process.toString()));
-
-        //check that new coordinator was created
-        String newBundleId = OozieUtil.getLatestBundleID(clusterOC, process.getName(), EntityType.PROCESS);
-        Assert.assertNotEquals(bundleId, newBundleId, "New Bundle should be created.");
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-}


[24/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/entypo-icons-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/entypo-icons-LICENSE.txt b/docs/license/entypo-icons-LICENSE.txt
deleted file mode 100644
index 604209a..0000000
--- a/docs/license/entypo-icons-LICENSE.txt
+++ /dev/null
@@ -1,359 +0,0 @@
-Creative Commons Legal Code
-
-Attribution-ShareAlike 3.0 Unported
-
-    CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
-    LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN
-    ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
-    INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
-    REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR
-    DAMAGES RESULTING FROM ITS USE.
-
-License
-
-THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
-COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
-COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
-AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
-
-BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
-TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
-BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
-CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
-CONDITIONS.
-
-1. Definitions
-
- a. "Adaptation" means a work based upon the Work, or upon the Work and
-    other pre-existing works, such as a translation, adaptation,
-    derivative work, arrangement of music or other alterations of a
-    literary or artistic work, or phonogram or performance and includes
-    cinematographic adaptations or any other form in which the Work may be
-    recast, transformed, or adapted including in any form recognizably
-    derived from the original, except that a work that constitutes a
-    Collection will not be considered an Adaptation for the purpose of
-    this License. For the avoidance of doubt, where the Work is a musical
-    work, performance or phonogram, the synchronization of the Work in
-    timed-relation with a moving image ("synching") will be considered an
-    Adaptation for the purpose of this License.
- b. "Collection" means a collection of literary or artistic works, such as
-    encyclopedias and anthologies, or performances, phonograms or
-    broadcasts, or other works or subject matter other than works listed
-    in Section 1(f) below, which, by reason of the selection and
-    arrangement of their contents, constitute intellectual creations, in
-    which the Work is included in its entirety in unmodified form along
-    with one or more other contributions, each constituting separate and
-    independent works in themselves, which together are assembled into a
-    collective whole. A work that constitutes a Collection will not be
-    considered an Adaptation (as defined below) for the purposes of this
-    License.
- c. "Creative Commons Compatible License" means a license that is listed
-    at https://creativecommons.org/compatiblelicenses that has been
-    approved by Creative Commons as being essentially equivalent to this
-    License, including, at a minimum, because that license: (i) contains
-    terms that have the same purpose, meaning and effect as the License
-    Elements of this License; and, (ii) explicitly permits the relicensing
-    of adaptations of works made available under that license under this
-    License or a Creative Commons jurisdiction license with the same
-    License Elements as this License.
- d. "Distribute" means to make available to the public the original and
-    copies of the Work or Adaptation, as appropriate, through sale or
-    other transfer of ownership.
- e. "License Elements" means the following high-level license attributes
-    as selected by Licensor and indicated in the title of this License:
-    Attribution, ShareAlike.
- f. "Licensor" means the individual, individuals, entity or entities that
-    offer(s) the Work under the terms of this License.
- g. "Original Author" means, in the case of a literary or artistic work,
-    the individual, individuals, entity or entities who created the Work
-    or if no individual or entity can be identified, the publisher; and in
-    addition (i) in the case of a performance the actors, singers,
-    musicians, dancers, and other persons who act, sing, deliver, declaim,
-    play in, interpret or otherwise perform literary or artistic works or
-    expressions of folklore; (ii) in the case of a phonogram the producer
-    being the person or legal entity who first fixes the sounds of a
-    performance or other sounds; and, (iii) in the case of broadcasts, the
-    organization that transmits the broadcast.
- h. "Work" means the literary and/or artistic work offered under the terms
-    of this License including without limitation any production in the
-    literary, scientific and artistic domain, whatever may be the mode or
-    form of its expression including digital form, such as a book,
-    pamphlet and other writing; a lecture, address, sermon or other work
-    of the same nature; a dramatic or dramatico-musical work; a
-    choreographic work or entertainment in dumb show; a musical
-    composition with or without words; a cinematographic work to which are
-    assimilated works expressed by a process analogous to cinematography;
-    a work of drawing, painting, architecture, sculpture, engraving or
-    lithography; a photographic work to which are assimilated works
-    expressed by a process analogous to photography; a work of applied
-    art; an illustration, map, plan, sketch or three-dimensional work
-    relative to geography, topography, architecture or science; a
-    performance; a broadcast; a phonogram; a compilation of data to the
-    extent it is protected as a copyrightable work; or a work performed by
-    a variety or circus performer to the extent it is not otherwise
-    considered a literary or artistic work.
- i. "You" means an individual or entity exercising rights under this
-    License who has not previously violated the terms of this License with
-    respect to the Work, or who has received express permission from the
-    Licensor to exercise rights under this License despite a previous
-    violation.
- j. "Publicly Perform" means to perform public recitations of the Work and
-    to communicate to the public those public recitations, by any means or
-    process, including by wire or wireless means or public digital
-    performances; to make available to the public Works in such a way that
-    members of the public may access these Works from a place and at a
-    place individually chosen by them; to perform the Work to the public
-    by any means or process and the communication to the public of the
-    performances of the Work, including by public digital performance; to
-    broadcast and rebroadcast the Work by any means including signs,
-    sounds or images.
- k. "Reproduce" means to make copies of the Work by any means including
-    without limitation by sound or visual recordings and the right of
-    fixation and reproducing fixations of the Work, including storage of a
-    protected performance or phonogram in digital form or other electronic
-    medium.
-
-2. Fair Dealing Rights. Nothing in this License is intended to reduce,
-limit, or restrict any uses free from copyright or rights arising from
-limitations or exceptions that are provided for in connection with the
-copyright protection under copyright law or other applicable laws.
-
-3. License Grant. Subject to the terms and conditions of this License,
-Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
-perpetual (for the duration of the applicable copyright) license to
-exercise the rights in the Work as stated below:
-
- a. to Reproduce the Work, to incorporate the Work into one or more
-    Collections, and to Reproduce the Work as incorporated in the
-    Collections;
- b. to create and Reproduce Adaptations provided that any such Adaptation,
-    including any translation in any medium, takes reasonable steps to
-    clearly label, demarcate or otherwise identify that changes were made
-    to the original Work. For example, a translation could be marked "The
-    original work was translated from English to Spanish," or a
-    modification could indicate "The original work has been modified.";
- c. to Distribute and Publicly Perform the Work including as incorporated
-    in Collections; and,
- d. to Distribute and Publicly Perform Adaptations.
- e. For the avoidance of doubt:
-
-     i. Non-waivable Compulsory License Schemes. In those jurisdictions in
-        which the right to collect royalties through any statutory or
-        compulsory licensing scheme cannot be waived, the Licensor
-        reserves the exclusive right to collect such royalties for any
-        exercise by You of the rights granted under this License;
-    ii. Waivable Compulsory License Schemes. In those jurisdictions in
-        which the right to collect royalties through any statutory or
-        compulsory licensing scheme can be waived, the Licensor waives the
-        exclusive right to collect such royalties for any exercise by You
-        of the rights granted under this License; and,
-   iii. Voluntary License Schemes. The Licensor waives the right to
-        collect royalties, whether individually or, in the event that the
-        Licensor is a member of a collecting society that administers
-        voluntary licensing schemes, via that society, from any exercise
-        by You of the rights granted under this License.
-
-The above rights may be exercised in all media and formats whether now
-known or hereafter devised. The above rights include the right to make
-such modifications as are technically necessary to exercise the rights in
-other media and formats. Subject to Section 8(f), all rights not expressly
-granted by Licensor are hereby reserved.
-
-4. Restrictions. The license granted in Section 3 above is expressly made
-subject to and limited by the following restrictions:
-
- a. You may Distribute or Publicly Perform the Work only under the terms
-    of this License. You must include a copy of, or the Uniform Resource
-    Identifier (URI) for, this License with every copy of the Work You
-    Distribute or Publicly Perform. You may not offer or impose any terms
-    on the Work that restrict the terms of this License or the ability of
-    the recipient of the Work to exercise the rights granted to that
-    recipient under the terms of the License. You may not sublicense the
-    Work. You must keep intact all notices that refer to this License and
-    to the disclaimer of warranties with every copy of the Work You
-    Distribute or Publicly Perform. When You Distribute or Publicly
-    Perform the Work, You may not impose any effective technological
-    measures on the Work that restrict the ability of a recipient of the
-    Work from You to exercise the rights granted to that recipient under
-    the terms of the License. This Section 4(a) applies to the Work as
-    incorporated in a Collection, but this does not require the Collection
-    apart from the Work itself to be made subject to the terms of this
-    License. If You create a Collection, upon notice from any Licensor You
-    must, to the extent practicable, remove from the Collection any credit
-    as required by Section 4(c), as requested. If You create an
-    Adaptation, upon notice from any Licensor You must, to the extent
-    practicable, remove from the Adaptation any credit as required by
-    Section 4(c), as requested.
- b. You may Distribute or Publicly Perform an Adaptation only under the
-    terms of: (i) this License; (ii) a later version of this License with
-    the same License Elements as this License; (iii) a Creative Commons
-    jurisdiction license (either this or a later license version) that
-    contains the same License Elements as this License (e.g.,
-    Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible
-    License. If you license the Adaptation under one of the licenses
-    mentioned in (iv), you must comply with the terms of that license. If
-    you license the Adaptation under the terms of any of the licenses
-    mentioned in (i), (ii) or (iii) (the "Applicable License"), you must
-    comply with the terms of the Applicable License generally and the
-    following provisions: (I) You must include a copy of, or the URI for,
-    the Applicable License with every copy of each Adaptation You
-    Distribute or Publicly Perform; (II) You may not offer or impose any
-    terms on the Adaptation that restrict the terms of the Applicable
-    License or the ability of the recipient of the Adaptation to exercise
-    the rights granted to that recipient under the terms of the Applicable
-    License; (III) You must keep intact all notices that refer to the
-    Applicable License and to the disclaimer of warranties with every copy
-    of the Work as included in the Adaptation You Distribute or Publicly
-    Perform; (IV) when You Distribute or Publicly Perform the Adaptation,
-    You may not impose any effective technological measures on the
-    Adaptation that restrict the ability of a recipient of the Adaptation
-    from You to exercise the rights granted to that recipient under the
-    terms of the Applicable License. This Section 4(b) applies to the
-    Adaptation as incorporated in a Collection, but this does not require
-    the Collection apart from the Adaptation itself to be made subject to
-    the terms of the Applicable License.
- c. If You Distribute, or Publicly Perform the Work or any Adaptations or
-    Collections, You must, unless a request has been made pursuant to
-    Section 4(a), keep intact all copyright notices for the Work and
-    provide, reasonable to the medium or means You are utilizing: (i) the
-    name of the Original Author (or pseudonym, if applicable) if supplied,
-    and/or if the Original Author and/or Licensor designate another party
-    or parties (e.g., a sponsor institute, publishing entity, journal) for
-    attribution ("Attribution Parties") in Licensor's copyright notice,
-    terms of service or by other reasonable means, the name of such party
-    or parties; (ii) the title of the Work if supplied; (iii) to the
-    extent reasonably practicable, the URI, if any, that Licensor
-    specifies to be associated with the Work, unless such URI does not
-    refer to the copyright notice or licensing information for the Work;
-    and (iv) , consistent with Ssection 3(b), in the case of an
-    Adaptation, a credit identifying the use of the Work in the Adaptation
-    (e.g., "French translation of the Work by Original Author," or
-    "Screenplay based on original Work by Original Author"). The credit
-    required by this Section 4(c) may be implemented in any reasonable
-    manner; provided, however, that in the case of a Adaptation or
-    Collection, at a minimum such credit will appear, if a credit for all
-    contributing authors of the Adaptation or Collection appears, then as
-    part of these credits and in a manner at least as prominent as the
-    credits for the other contributing authors. For the avoidance of
-    doubt, You may only use the credit required by this Section for the
-    purpose of attribution in the manner set out above and, by exercising
-    Your rights under this License, You may not implicitly or explicitly
-    assert or imply any connection with, sponsorship or endorsement by the
-    Original Author, Licensor and/or Attribution Parties, as appropriate,
-    of You or Your use of the Work, without the separate, express prior
-    written permission of the Original Author, Licensor and/or Attribution
-    Parties.
- d. Except as otherwise agreed in writing by the Licensor or as may be
-    otherwise permitted by applicable law, if You Reproduce, Distribute or
-    Publicly Perform the Work either by itself or as part of any
-    Adaptations or Collections, You must not distort, mutilate, modify or
-    take other derogatory action in relation to the Work which would be
-    prejudicial to the Original Author's honor or reputation. Licensor
-    agrees that in those jurisdictions (e.g. Japan), in which any exercise
-    of the right granted in Section 3(b) of this License (the right to
-    make Adaptations) would be deemed to be a distortion, mutilation,
-    modification or other derogatory action prejudicial to the Original
-    Author's honor and reputation, the Licensor will waive or not assert,
-    as appropriate, this Section, to the fullest extent permitted by the
-    applicable national law, to enable You to reasonably exercise Your
-    right under Section 3(b) of this License (right to make Adaptations)
-    but not otherwise.
-
-5. Representations, Warranties and Disclaimer
-
-UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
-OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
-KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
-INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
-FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
-LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
-WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
-OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
-
-6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
-LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
-ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
-ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
-BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-7. Termination
-
- a. This License and the rights granted hereunder will terminate
-    automatically upon any breach by You of the terms of this License.
-    Individuals or entities who have received Adaptations or Collections
-    from You under this License, however, will not have their licenses
-    terminated provided such individuals or entities remain in full
-    compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
-    survive any termination of this License.
- b. Subject to the above terms and conditions, the license granted here is
-    perpetual (for the duration of the applicable copyright in the Work).
-    Notwithstanding the above, Licensor reserves the right to release the
-    Work under different license terms or to stop distributing the Work at
-    any time; provided, however that any such election will not serve to
-    withdraw this License (or any other license that has been, or is
-    required to be, granted under the terms of this License), and this
-    License will continue in full force and effect unless terminated as
-    stated above.
-
-8. Miscellaneous
-
- a. Each time You Distribute or Publicly Perform the Work or a Collection,
-    the Licensor offers to the recipient a license to the Work on the same
-    terms and conditions as the license granted to You under this License.
- b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
-    offers to the recipient a license to the original Work on the same
-    terms and conditions as the license granted to You under this License.
- c. If any provision of this License is invalid or unenforceable under
-    applicable law, it shall not affect the validity or enforceability of
-    the remainder of the terms of this License, and without further action
-    by the parties to this agreement, such provision shall be reformed to
-    the minimum extent necessary to make such provision valid and
-    enforceable.
- d. No term or provision of this License shall be deemed waived and no
-    breach consented to unless such waiver or consent shall be in writing
-    and signed by the party to be charged with such waiver or consent.
- e. This License constitutes the entire agreement between the parties with
-    respect to the Work licensed here. There are no understandings,
-    agreements or representations with respect to the Work not specified
-    here. Licensor shall not be bound by any additional provisions that
-    may appear in any communication from You. This License may not be
-    modified without the mutual written agreement of the Licensor and You.
- f. The rights granted under, and the subject matter referenced, in this
-    License were drafted utilizing the terminology of the Berne Convention
-    for the Protection of Literary and Artistic Works (as amended on
-    September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
-    Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
-    and the Universal Copyright Convention (as revised on July 24, 1971).
-    These rights and subject matter take effect in the relevant
-    jurisdiction in which the License terms are sought to be enforced
-    according to the corresponding provisions of the implementation of
-    those treaty provisions in the applicable national law. If the
-    standard suite of rights granted under applicable copyright law
-    includes additional rights not granted under this License, such
-    additional rights are deemed to be included in the License; this
-    License is not intended to restrict the license of any rights under
-    applicable law.
-
-
-Creative Commons Notice
-
-    Creative Commons is not a party to this License, and makes no warranty
-    whatsoever in connection with the Work. Creative Commons will not be
-    liable to You or any party on any legal theory for any damages
-    whatsoever, including without limitation any general, special,
-    incidental or consequential damages arising in connection to this
-    license. Notwithstanding the foregoing two (2) sentences, if Creative
-    Commons has expressly identified itself as the Licensor hereunder, it
-    shall have all rights and obligations of Licensor.
-
-    Except for the limited purpose of indicating to the public that the
-    Work is licensed under the CCPL, Creative Commons does not authorize
-    the use by either party of the trademark "Creative Commons" or any
-    related trademark or logo of Creative Commons without the prior
-    written consent of Creative Commons. Any permitted use will be in
-    compliance with Creative Commons' then-current trademark usage
-    guidelines, as may be published on its website or otherwise made
-    available upon request from time to time. For the avoidance of doubt,
-    this trademark restriction does not form part of the License.
-
-    Creative Commons may be contacted at https://creativecommons.org/.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/jasmine-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/jasmine-LICENSE.txt b/docs/license/jasmine-LICENSE.txt
deleted file mode 100644
index 7f279f7..0000000
--- a/docs/license/jasmine-LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-Copyright (c) 2008-2014 Pivotal Labs
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/license/jquery-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/jquery-LICENSE.txt b/docs/license/jquery-LICENSE.txt
deleted file mode 100644
index 1f928fd..0000000
--- a/docs/license/jquery-LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2005, 2014 jQuery Foundation, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
deleted file mode 100644
index 766f408..0000000
--- a/docs/pom.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-docs</artifactId>
-    <description>Apache Falcon Documentation</description>
-    <name>Apache Falcon Documentation</name>
-
-    <properties>
-        <skipTests>true</skipTests>
-    </properties>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-site-plugin</artifactId>
-                <dependencies>
-                    <dependency>
-                        <groupId>org.apache.maven.doxia</groupId>
-                        <artifactId>doxia-module-twiki</artifactId>
-                        <version>1.6</version>
-                    </dependency>
-                </dependencies>
-                <executions>
-                    <execution>
-                        <goals>
-                            <goal>site</goal>
-                        </goals>
-                        <phase>prepare-package</phase>
-                    </execution>
-                </executions>
-                <configuration>
-                    <generateProjectInfo>false</generateProjectInfo>
-                    <generateReports>false</generateReports>
-                    <skip>false</skip>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/Architecture.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/Architecture.png b/docs/src/site/resources/Architecture.png
deleted file mode 100644
index 0378b49..0000000
Binary files a/docs/src/site/resources/Architecture.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/EntityDependency.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/EntityDependency.png b/docs/src/site/resources/EntityDependency.png
deleted file mode 100644
index 9f11870..0000000
Binary files a/docs/src/site/resources/EntityDependency.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/FeedSchedule.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/FeedSchedule.png b/docs/src/site/resources/FeedSchedule.png
deleted file mode 100644
index 105c6b1..0000000
Binary files a/docs/src/site/resources/FeedSchedule.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/PrismSetup.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/PrismSetup.png b/docs/src/site/resources/PrismSetup.png
deleted file mode 100644
index b0dc9a5..0000000
Binary files a/docs/src/site/resources/PrismSetup.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/ProcessSchedule.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/ProcessSchedule.png b/docs/src/site/resources/ProcessSchedule.png
deleted file mode 100644
index a7dd788..0000000
Binary files a/docs/src/site/resources/ProcessSchedule.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/accessories-text-editor.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/accessories-text-editor.png b/docs/src/site/resources/images/accessories-text-editor.png
deleted file mode 100644
index abc3366..0000000
Binary files a/docs/src/site/resources/images/accessories-text-editor.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/add.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/add.gif b/docs/src/site/resources/images/add.gif
deleted file mode 100644
index 1cb3dbf..0000000
Binary files a/docs/src/site/resources/images/add.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/apache-incubator-logo.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/apache-incubator-logo.png b/docs/src/site/resources/images/apache-incubator-logo.png
deleted file mode 100644
index 81fb31e..0000000
Binary files a/docs/src/site/resources/images/apache-incubator-logo.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/apache-maven-project-2.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/apache-maven-project-2.png b/docs/src/site/resources/images/apache-maven-project-2.png
deleted file mode 100644
index 6c096ec..0000000
Binary files a/docs/src/site/resources/images/apache-maven-project-2.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/application-certificate.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/application-certificate.png b/docs/src/site/resources/images/application-certificate.png
deleted file mode 100644
index cc6aff6..0000000
Binary files a/docs/src/site/resources/images/application-certificate.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/contact-new.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/contact-new.png b/docs/src/site/resources/images/contact-new.png
deleted file mode 100644
index ebc4316..0000000
Binary files a/docs/src/site/resources/images/contact-new.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/document-properties.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/document-properties.png b/docs/src/site/resources/images/document-properties.png
deleted file mode 100644
index 34c2409..0000000
Binary files a/docs/src/site/resources/images/document-properties.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/drive-harddisk.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/drive-harddisk.png b/docs/src/site/resources/images/drive-harddisk.png
deleted file mode 100644
index d7ce475..0000000
Binary files a/docs/src/site/resources/images/drive-harddisk.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/falcon-logo.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/falcon-logo.png b/docs/src/site/resources/images/falcon-logo.png
deleted file mode 100644
index 0a9f6cf..0000000
Binary files a/docs/src/site/resources/images/falcon-logo.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/fix.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/fix.gif b/docs/src/site/resources/images/fix.gif
deleted file mode 100644
index b7eb3dc..0000000
Binary files a/docs/src/site/resources/images/fix.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/icon_error_sml.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/icon_error_sml.gif b/docs/src/site/resources/images/icon_error_sml.gif
deleted file mode 100644
index 12e9a01..0000000
Binary files a/docs/src/site/resources/images/icon_error_sml.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/icon_help_sml.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/icon_help_sml.gif b/docs/src/site/resources/images/icon_help_sml.gif
deleted file mode 100644
index aaf20e6..0000000
Binary files a/docs/src/site/resources/images/icon_help_sml.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/icon_info_sml.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/icon_info_sml.gif b/docs/src/site/resources/images/icon_info_sml.gif
deleted file mode 100644
index b776326..0000000
Binary files a/docs/src/site/resources/images/icon_info_sml.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/icon_success_sml.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/icon_success_sml.gif b/docs/src/site/resources/images/icon_success_sml.gif
deleted file mode 100644
index 0a19527..0000000
Binary files a/docs/src/site/resources/images/icon_success_sml.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/icon_warning_sml.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/icon_warning_sml.gif b/docs/src/site/resources/images/icon_warning_sml.gif
deleted file mode 100644
index ac6ad6a..0000000
Binary files a/docs/src/site/resources/images/icon_warning_sml.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/image-x-generic.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/image-x-generic.png b/docs/src/site/resources/images/image-x-generic.png
deleted file mode 100644
index ab49efb..0000000
Binary files a/docs/src/site/resources/images/image-x-generic.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/internet-web-browser.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/internet-web-browser.png b/docs/src/site/resources/images/internet-web-browser.png
deleted file mode 100644
index 307d6ac..0000000
Binary files a/docs/src/site/resources/images/internet-web-browser.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/logos/build-by-maven-black.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/logos/build-by-maven-black.png b/docs/src/site/resources/images/logos/build-by-maven-black.png
deleted file mode 100644
index 919fd0f..0000000
Binary files a/docs/src/site/resources/images/logos/build-by-maven-black.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/logos/build-by-maven-white.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/logos/build-by-maven-white.png b/docs/src/site/resources/images/logos/build-by-maven-white.png
deleted file mode 100644
index 7d44c9c..0000000
Binary files a/docs/src/site/resources/images/logos/build-by-maven-white.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/logos/maven-feather.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/logos/maven-feather.png b/docs/src/site/resources/images/logos/maven-feather.png
deleted file mode 100644
index b5ada83..0000000
Binary files a/docs/src/site/resources/images/logos/maven-feather.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/network-server.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/network-server.png b/docs/src/site/resources/images/network-server.png
deleted file mode 100644
index 1d12e19..0000000
Binary files a/docs/src/site/resources/images/network-server.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/package-x-generic.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/package-x-generic.png b/docs/src/site/resources/images/package-x-generic.png
deleted file mode 100644
index 8b7e9e6..0000000
Binary files a/docs/src/site/resources/images/package-x-generic.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/profiles/pre-release.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/profiles/pre-release.png b/docs/src/site/resources/images/profiles/pre-release.png
deleted file mode 100644
index d448e85..0000000
Binary files a/docs/src/site/resources/images/profiles/pre-release.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/profiles/retired.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/profiles/retired.png b/docs/src/site/resources/images/profiles/retired.png
deleted file mode 100644
index f89f6a2..0000000
Binary files a/docs/src/site/resources/images/profiles/retired.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/profiles/sandbox.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/profiles/sandbox.png b/docs/src/site/resources/images/profiles/sandbox.png
deleted file mode 100644
index f88b362..0000000
Binary files a/docs/src/site/resources/images/profiles/sandbox.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/remove.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/remove.gif b/docs/src/site/resources/images/remove.gif
deleted file mode 100644
index fc65631..0000000
Binary files a/docs/src/site/resources/images/remove.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/rss.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/rss.png b/docs/src/site/resources/images/rss.png
deleted file mode 100644
index a9850ee..0000000
Binary files a/docs/src/site/resources/images/rss.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/update.gif
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/update.gif b/docs/src/site/resources/images/update.gif
deleted file mode 100644
index b2a6d0b..0000000
Binary files a/docs/src/site/resources/images/update.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/resources/images/window-new.png
----------------------------------------------------------------------
diff --git a/docs/src/site/resources/images/window-new.png b/docs/src/site/resources/images/window-new.png
deleted file mode 100644
index 0e12ef9..0000000
Binary files a/docs/src/site/resources/images/window-new.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/site.xml
----------------------------------------------------------------------
diff --git a/docs/src/site/site.xml b/docs/src/site/site.xml
deleted file mode 100644
index aeb7a5e..0000000
--- a/docs/src/site/site.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project name="Falcon" xmlns="http://maven.apache.org/DECORATION/1.3.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/DECORATION/1.3.0 http://maven.apache.org/xsd/decoration-1.3.0.xsd">
-
-    <skin>
-        <groupId>org.apache.maven.skins</groupId>
-        <artifactId>maven-fluido-skin</artifactId>
-        <version>1.3.0</version>
-    </skin>
-
-    <custom>
-        <fluidoSkin>
-            <project>Apache Falcon</project>
-            <sideBarEnabled>false</sideBarEnabled>
-        </fluidoSkin>
-    </custom>
-
-    <bannerLeft>
-        <name>Apache Falcon</name>
-        <src>./images/falcon-logo.png</src>
-        <width>200px</width>
-        <height>45px</height>
-    </bannerLeft>
-
-    <publishDate position="right"/>
-    <version position="right"/>
-
-    <body>
-        <head>
-            <script type="text/javascript">
-                $( document ).ready( function() { $( '.carousel' ).carousel( { interval: 3500 } ) } );
-            </script>
-        </head>
-
-        <breadcrumbs position="left">
-            <item name="Falcon" title="Apache Falcon" href="index.html"/>
-        </breadcrumbs>
-
-        <footer>
-            © 2011-2012 The Apache Software Foundation. Apache Falcon, Falcon, Apache, the Apache feather logo,
-            and the Apache Falcon project logo are trademarks of The Apache Software Foundation.
-        </footer>
-    </body>
-</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Appendix.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Appendix.twiki b/docs/src/site/twiki/Appendix.twiki
deleted file mode 100644
index e3752fb..0000000
--- a/docs/src/site/twiki/Appendix.twiki
+++ /dev/null
@@ -1,55 +0,0 @@
----+ Compatibility
-
----++ 0.6-incubating Version Compatibility Matrix
-
-   * Hadoop 2.5.0 and above
-   * Oozie  4.0.0 and above
-   * Hive 0.11.0 and above
-   * HCatalog 0.11.0 and above
-   * Active MQ 5.4.3 and above
-   * Titan 0.4.2 and above but below 0.5
-
-
----++ 0.6-incubating Tested Compatibility
-
-   * Hadoop 2.6.0
-   * Oozie  4.1.0
-   * Hive 0.14.0
-   * HCatalog 0.14.0
-   * Active MQ 5.4.3
-   * Titan 0.4.2
-   * Java 1.6, Java 1.7
-
-   Note : Oozie versions below 4.1.0 are not compatible with Java 1.7
-
----++ 0.6-incubating Release Notes
-
-Major additions are listed below. Refer to CHANGES.txt for detailed issues addressed in this release.
-
-   * Security - Authorization, SSL
-   * Lineage - More complete with better API
-   * Recipes
-   * Usability improvements - Dry run, entity summary, Pagination, etc.
-   * Operability - Alerts, Audits, etc.
-   * Refactoring - Messaging, Orchestration of workflows, etc.
-   * Extension points for developers
-   * Many bug fixes
-
-
----++ 0.6-incubating Upgrade Instructions
-
-Please follow these instructions when upgrading from an older release.
-
----+++ Upgrading from 0.5-incubating
-
-0.6-incubating is backwards *incompatible* with 0.5-incubating. It is recommended that user do not
-migrate from 0.5 to 0.6. However if the user must migrate from 0.5-incubating to 0.6-incubating,
-user should [[https://cwiki.apache.org/confluence/display/FALCON/Index][follow these instructions]
-
----+++ Upgrading from 0.4-incubating
-
-It is not possible to upgrade to 0.6-incubating from 0.4-incubating.
-
----+++ Upgrading from 0.3-incubating
-
-It is not possible to upgrade to 0.6-incubating from 0.3-incubating.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Configuration.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Configuration.twiki b/docs/src/site/twiki/Configuration.twiki
deleted file mode 100644
index 0df094f..0000000
--- a/docs/src/site/twiki/Configuration.twiki
+++ /dev/null
@@ -1,122 +0,0 @@
----+Configuring Falcon
-
-By default config directory used by falcon is {package dir}/conf. To override this (to use the same conf with multiple
-falcon upgrades), set environment variable FALCON_CONF to the path of the conf dir.
-
-falcon-env.sh has been added to the falcon conf. This file can be used to set various environment variables that you
-need for you services.
-In addition you can set any other environment variables you might need. This file will be sourced by falcon scripts
-before any commands are executed. The following environment variables are available to set.
-
-<verbatim>
-# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
-#export JAVA_HOME=
-
-# any additional java opts you want to set. This will apply to both client and server operations
-#export FALCON_OPTS=
-
-# any additional java opts that you want to set for client only
-#export FALCON_CLIENT_OPTS=
-
-# java heap size we want to set for the client. Default is 1024MB
-#export FALCON_CLIENT_HEAP=
-
-# any additional opts you want to set for prism service.
-#export FALCON_PRISM_OPTS=
-
-# java heap size we want to set for the prism service. Default is 1024MB
-#export FALCON_PRISM_HEAP=
-
-# any additional opts you want to set for falcon service.
-#export FALCON_SERVER_OPTS=
-
-# java heap size we want to set for the falcon server. Default is 1024MB
-#export FALCON_SERVER_HEAP=
-
-# What is is considered as falcon home dir. Default is the base location of the installed software
-#export FALCON_HOME_DIR=
-
-# Where log files are stored. Default is logs directory under the base install location
-#export FALCON_LOG_DIR=
-
-# Where pid files are stored. Default is logs directory under the base install location
-#export FALCON_PID_DIR=
-
-# where the falcon active mq data is stored. Default is logs/data directory under the base install location
-#export FALCON_DATA_DIR=
-
-# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
-#export FALCON_EXPANDED_WEBAPP_DIR=
-</verbatim>
-
----++Advanced Configurations
-
----+++Configuring Monitoring plugin to register catalog partitions
-Falcon comes with a monitoring plugin that registers catalog partition. This comes in really handy during migration from
- filesystem based feeds to hcatalog based feeds.
-This plugin enables the user to de-couple the partition registration and assume that all partitions are already on
-hcatalog even before the migration, simplifying the hcatalog migration.
-
-By default this plugin is disabled.
-To enable this plugin and leverage the feature, there are 3 pre-requisites:
-<verbatim>
-In {package dir}/conf/startup.properties, add
-*.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandler
-
-In the cluster definition, ensure registry endpoint is defined.
-Ex:
-<interface type="registry" endpoint="thrift://localhost:1109" version="0.13.3"/>
-
-In the feed definition, ensure the corresponding catalog table is mentioned in feed-properties
-Ex:
-<properties>
-    <property name="catalog.table" value="catalog:default:in_table#year={YEAR};month={MONTH};day={DAY};hour={HOUR};
-    minute={MINUTE}"/>
-</properties>
-</verbatim>
-
-*NOTE : for Mac OS users*
-<verbatim>
-If you are using a Mac OS, you will need to configure the FALCON_SERVER_OPTS (explained above).
-
-In  {package dir}/conf/falcon-env.sh uncomment the following line
-#export FALCON_SERVER_OPTS=
-
-and change it to look as below
-export FALCON_SERVER_OPTS="-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
-</verbatim>
-
----+++Activemq
-
-* falcon server starts embedded active mq. To control this behaviour, set the following system properties using -D
-option in environment variable FALCON_OPTS:
-   * falcon.embeddedmq=<true/false> - Should server start embedded active mq, default true
-   * falcon.embeddedmq.port=<port> - Port for embedded active mq, default 61616
-   * falcon.embeddedmq.data=<path> - Data path for embedded active mq, default {package dir}/logs/data
-
----+++Falcon System Notifications
-Some Falcon features such as late data handling, retries, metadata service, depend on JMS notifications sent when the Oozie workflow completes. These system notifications are sent as part of Falcon Post Processing action. Given that the post processing action is also a job, it is prone to failures and in case of failures, Falcon is blind to the status of the workflow. To alleviate this problem and make the notifications more reliable, you can enable Oozie's JMS notification feature and disable Falcon post-processing notification by making the following changes:
-   * In Falcon runtime.properties, set *.falcon.jms.notification.enabled to false. This will turn off JMS notification in post-processing.
-   * Copy notification related properties in oozie/conf/oozie-site.xml to oozie-site.xml of the Oozie installation.  Restart Oozie so changes get reflected.  
-
-*NOTE : If you disable Falcon post-processing JMS notification and not enable Oozie JMS notification, features such as failure retry, late data handling and metadata service will be disabled for all entities on the server.*
-
----+++Enabling Falcon Native Scheudler
-You can either choose to schedule entities using Oozie's coordinator or using Falcon's native scheduler. To be able to schedule entities natively on Falcon, you will need to add some additional properties to <verbatim>$FALCON_HOME/conf/startup.properties</verbatim> before starting the Falcon Server. For details on the same, refer to [[FalconNativeScheduler][Falcon Native Scheduler]]
-
----+++Adding Extension Libraries
-
-Library extensions allows users to add custom libraries to entity lifecycles such as feed retention, feed replication
-and process execution. This is useful for usecases such as adding filesystem extensions. To enable this, add the
-following configs to startup.properties:
-*.libext.paths=<paths to be added to all entity lifecycles>
-
-*.libext.feed.paths=<paths to be added to all feed lifecycles>
-
-*.libext.feed.retentions.paths=<paths to be added to feed retention workflow>
-
-*.libext.feed.replication.paths=<paths to be added to feed replication workflow>
-
-*.libext.process.paths=<paths to be added to process workflow>
-
-The configured jars are added to falcon classpath and the corresponding workflows.

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Distributed-mode.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Distributed-mode.twiki b/docs/src/site/twiki/Distributed-mode.twiki
deleted file mode 100644
index 34fb092..0000000
--- a/docs/src/site/twiki/Distributed-mode.twiki
+++ /dev/null
@@ -1,198 +0,0 @@
----+Distributed Mode
-
-
-Following are the steps needed to package and deploy Falcon in Embedded Mode. You need to complete Steps 1-3 mentioned
- [[InstallationSteps][here]] before proceeding further.
-
----++Package Falcon
-Ensure that you are in the base directory (where you cloned Falcon). Let’s call it {project dir}
-
-<verbatim>
-$mvn clean assembly:assembly -DskipTests -DskipCheck=true -Pdistributed,hadoop-2
-</verbatim>
-
-
-<verbatim>
-$ls {project dir}/distro/target/
-</verbatim>
-
-It should give an output like below :
-<verbatim>
-apache-falcon-distributed-${project.version}-server.tar.gz
-apache-falcon-distributed-${project.version}-sources.tar.gz
-archive-tmp
-maven-shared-archive-resources
-</verbatim>
-
-   * apache-falcon-distributed-${project.version}-sources.tar.gz contains source files of Falcon repo.
-
-   * apache-falcon-distributed-${project.version}-server.tar.gz package contains project artifacts along with it's
-dependencies, configuration files and scripts required to deploy Falcon.
-
-
-Tar can be found in {project dir}/target/apache-falcon-distributed-${project.version}-server.tar.gz . This is the tar
-used for installing Falcon. Lets call it {falcon package}
-
-Tar is structured as follows.
-
-<verbatim>
-
-|- bin
-   |- falcon
-   |- falcon-start
-   |- falcon-stop
-   |- falcon-status
-   |- falcon-config.sh
-   |- service-start.sh
-   |- service-stop.sh
-   |- service-status.sh
-   |- prism-stop
-   |- prism-start
-   |- prism-status
-|- conf
-   |- startup.properties
-   |- runtime.properties
-   |- client.properties
-   |- prism.keystore
-   |- log4j.xml
-   |- falcon-env.sh
-|- docs
-|- client
-   |- lib (client support libs)
-|- server
-   |- webapp
-      |- falcon.war
-      |- prism.war
-|- oozie
-   |- conf
-   |- libext
-|- hadooplibs
-|- README
-|- NOTICE.txt
-|- LICENSE.txt
-|- DISCLAIMER.txt
-|- CHANGES.txt
-</verbatim>
-
-
----++Installing & running Falcon
-
----+++Installing Falcon
-
-Running Falcon in distributed mode requires bringing up both prism and server.As the name suggests Falcon prism splits
-the request it gets to the Falcon servers. It is a good practice to start prism and server with their corresponding
-configurations separately. Create separate directory for prism and server. Let's call them {falcon-prism-dir} and
-{falcon-server-dir} respectively.
-
-*For prism*
-<verbatim>
-$mkdir {falcon-prism-dir}
-$tar -xzvf {falcon package}
-</verbatim>
-
-*For server*
-<verbatim>
-$mkdir {falcon-server-dir}
-$tar -xzvf {falcon package}
-</verbatim>
-
-
----+++Starting Prism
-
-<verbatim>
-cd {falcon-prism-dir}/falcon-distributed-${project.version}
-bin/prism-start [-port <port>]
-</verbatim>
-
-By default,
-* prism server starts at port 16443. To change the port, use -port option
-
-* falcon.enableTLS can be set to true or false explicitly to enable SSL, if not port that end with 443 will
-automatically put prism on https://
-
-* prism starts with conf from {falcon-prism-dir}/falcon-distributed-${project.version}/conf. To override this (to use
-the same conf with multiple prism upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
-the instructions for configuring Falcon [[Configuration][here]].
-
-*Enabling prism-client*
-*If prism is not started using default-port 16443 then edit the following property in
-{falcon-prism-dir}/falcon-distributed-${project.version}/conf/client.properties
-falcon.url=http://{machine-ip}:{prism-port}/
-
-
----+++Starting Falcon Server
-
-<verbatim>
-$cd {falcon-server-dir}/falcon-distributed-${project.version}
-$bin/falcon-start [-port <port>]
-</verbatim>
-
-By default,
-* If falcon.enableTLS is set to true explicitly or not set at all, Falcon starts at port 15443 on https:// by default.
-
-* If falcon.enableTLS is set to false explicitly, Falcon starts at port 15000 on http://.
-
-* To change the port, use -port option.
-
-* If falcon.enableTLS is not set explicitly, port that ends with 443 will automatically put Falcon on https://. Any
-other port will put Falcon on http://.
-
-* server starts with conf from {falcon-server-dir}/falcon-distributed-${project.version}/conf. To override this (to use
-the same conf with multiple server upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
- the instructions for configuring Falcon [[Configuration][here]].
-
-*Enabling server-client*
-*If server is not started using default-port 15443 then edit the following property in
-{falcon-server-dir}/falcon-distributed-${project.version}/conf/client.properties. You can find the instructions for
-configuring Falcon here.
-falcon.url=http://{machine-ip}:{server-port}/
-
-*NOTE* : https is the secure version of HTTP, the protocol over which data is sent between your browser and the website
-that you are connected to. By default Falcon runs in https mode. But user can configure it to http.
-
-
----+++Using Falcon
-
-<verbatim>
-$cd {falcon-prism-dir}/falcon-distributed-${project.version}
-$bin/falcon admin -version
-Falcon server build version: {Version:"${project.version}-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",
-Mode:"embedded"}
-
-$bin/falcon help
-(for more details about Falcon cli usage)
-</verbatim>
-
-
----+++Dashboard
-
-Once Falcon / prism is started, you can view the status of Falcon entities using the Web-based dashboard. You can open
-your browser at the corresponding port to use the web UI.
-
-Falcon dashboard makes the REST api calls as user "falcon-dashboard". If this user does not exist on your Falcon and
-Oozie servers, please create the user.
-
-<verbatim>
-## create user.
-[root@falconhost ~] useradd -U -m falcon-dashboard -G users
-
-## verify user is created with membership in correct groups.
-[root@falconhost ~] groups falcon-dashboard
-falcon-dashboard : falcon-dashboard users
-[root@falconhost ~]
-</verbatim>
-
-
----+++Stopping Falcon Server
-
-<verbatim>
-$cd {falcon-server-dir}/falcon-distributed-${project.version}
-$bin/falcon-stop
-</verbatim>
-
----+++Stopping Falcon Prism
-
-<verbatim>
-$cd {falcon-prism-dir}/falcon-distributed-${project.version}
-$bin/prism-stop
-</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/docs/src/site/twiki/Embedded-mode.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Embedded-mode.twiki b/docs/src/site/twiki/Embedded-mode.twiki
deleted file mode 100644
index d5c37a1..0000000
--- a/docs/src/site/twiki/Embedded-mode.twiki
+++ /dev/null
@@ -1,198 +0,0 @@
----+Embedded Mode
-
-Following are the steps needed to package and deploy Falcon in Embedded Mode. You need to complete Steps 1-3 mentioned
- [[InstallationSteps][here]] before proceeding further.
-
----++Package Falcon
-Ensure that you are in the base directory (where you cloned Falcon). Let’s call it {project dir}
-
-<verbatim>
-$mvn clean assembly:assembly -DskipTests -DskipCheck=true
-</verbatim>
-
-<verbatim>
-$ls {project dir}/distro/target/
-</verbatim>
-It should give an output like below :
-<verbatim>
-apache-falcon-${project.version}-bin.tar.gz
-apache-falcon-${project.version}-sources.tar.gz
-archive-tmp
-maven-shared-archive-resources
-</verbatim>
-
-* apache-falcon-${project.version}-sources.tar.gz contains source files of Falcon repo.
-
-* apache-falcon-${project.version}-bin.tar.gz package contains project artifacts along with it's dependencies,
-configuration files and scripts required to deploy Falcon.
-
-Tar can be found in {project dir}/target/apache-falcon-${project.version}-bin.tar.gz
-
-Tar is structured as follows :
-
-<verbatim>
-
-|- bin
-   |- falcon
-   |- falcon-start
-   |- falcon-stop
-   |- falcon-status
-   |- falcon-config.sh
-   |- service-start.sh
-   |- service-stop.sh
-   |- service-status.sh
-|- conf
-   |- startup.properties
-   |- runtime.properties
-   |- prism.keystore
-   |- client.properties
-   |- log4j.xml
-   |- falcon-env.sh
-|- docs
-|- client
-   |- lib (client support libs)
-|- server
-   |- webapp
-      |- falcon.war
-|- data
-   |- falcon-store
-   |- graphdb
-   |- localhost
-|- examples
-   |- app
-      |- hive
-      |- oozie-mr
-      |- pig
-   |- data
-   |- entity
-      |- filesystem
-      |- hcat
-|- oozie
-   |- conf
-   |- libext
-|- logs
-|- hadooplibs
-|- README
-|- NOTICE.txt
-|- LICENSE.txt
-|- DISCLAIMER.txt
-|- CHANGES.txt
-</verbatim>
-
-
----++Installing & running Falcon
-
-Running Falcon in embedded mode requires bringing up server.
-
-<verbatim>
-$tar -xzvf {falcon package}
-$cd falcon-${project.version}
-</verbatim>
-
-
----+++Starting Falcon Server
-<verbatim>
-$cd falcon-${project.version}
-$bin/falcon-start [-port <port>]
-</verbatim>
-
-By default,
-* If falcon.enableTLS is set to true explicitly or not set at all, Falcon starts at port 15443 on https:// by default.
-
-* If falcon.enableTLS is set to false explicitly, Falcon starts at port 15000 on http://.
-
-* To change the port, use -port option.
-
-* If falcon.enableTLS is not set explicitly, port that ends with 443 will automatically put Falcon on https://. Any
-other port will put Falcon on http://.
-
-* Server starts with conf from {falcon-server-dir}/falcon-distributed-${project.version}/conf. To override this (to use
-the same conf with multiple server upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
- the instructions for configuring Falcon [[Configuration][here]].
-
-
----+++Enabling server-client
-If server is not started using default-port 15443 then edit the following property in
-{falcon-server-dir}/falcon-${project.version}/conf/client.properties
-
-falcon.url=http://{machine-ip}:{server-port}/
-
-
----+++Using Falcon
-<verbatim>
-$cd falcon-${project.version}
-$bin/falcon admin -version
-Falcon server build version: {Version:"${project.version}-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",Mode:
-"embedded",Hadoop:"${hadoop.version}"}
-
-$bin/falcon help
-(for more details about Falcon cli usage)
-</verbatim>
-
-*Note* : https is the secure version of HTTP, the protocol over which data is sent between your browser and the website
-that you are connected to. By default Falcon runs in https mode. But user can configure it to http.
-
-
----+++Dashboard
-
-Once Falcon server is started, you can view the status of Falcon entities using the Web-based dashboard. You can open
-your browser at the corresponding port to use the web UI.
-
-Falcon dashboard makes the REST api calls as user "falcon-dashboard". If this user does not exist on your Falcon and
-Oozie servers, please create the user.
-
-<verbatim>
-## create user.
-[root@falconhost ~] useradd -U -m falcon-dashboard -G users
-
-## verify user is created with membership in correct groups.
-[root@falconhost ~] groups falcon-dashboard
-falcon-dashboard : falcon-dashboard users
-[root@falconhost ~]
-</verbatim>
-
-
----++Running Examples using embedded package
-<verbatim>
-$cd falcon-${project.version}
-$bin/falcon-start
-</verbatim>
-Make sure the Hadoop and Oozie endpoints are according to your setup in
-examples/entity/filesystem/standalone-cluster.xml
-The cluster locations,staging and working dirs, MUST be created prior to submitting a cluster entity to Falcon.
-*staging* must have 777 permissions and the parent dirs must have execute permissions
-*working* must have 755 permissions and the parent dirs must have execute permissions
-<verbatim>
-$bin/falcon entity -submit -type cluster -file examples/entity/filesystem/standalone-cluster.xml
-</verbatim>
-Submit input and output feeds:
-<verbatim>
-$bin/falcon entity -submit -type feed -file examples/entity/filesystem/in-feed.xml
-$bin/falcon entity -submit -type feed -file examples/entity/filesystem/out-feed.xml
-</verbatim>
-Set-up workflow for the process:
-<verbatim>
-$hadoop fs -put examples/app /
-</verbatim>
-Submit and schedule the process:
-<verbatim>
-$bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/oozie-mr-process.xml
-$bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/pig-process.xml
-</verbatim>
-Generate input data:
-<verbatim>
-$examples/data/generate.sh <<hdfs endpoint>>
-</verbatim>
-Get status of instances:
-<verbatim>
-$bin/falcon instance -status -type process -name oozie-mr-process -start 2013-11-15T00:05Z -end 2013-11-15T01:00Z
-</verbatim>
-
-HCat based example entities are in examples/entity/hcat.
-
-
----+++Stopping Falcon Server
-<verbatim>
-$cd falcon-${project.version}
-$bin/falcon-stop
-</verbatim>


[05/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitTest.java
deleted file mode 100644
index 53f3df8..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSubmitTest.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-
-import org.apache.commons.httpclient.HttpStatus;
-import org.apache.falcon.entity.v0.feed.LocationType;
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed submission tests.
- */
-@Test(groups = "embedded")
-public class FeedSubmitTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private String feed;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0].generateUniqueBundle(this);
-        bundles[0] = new Bundle(bundles[0], cluster);
-
-        //submit the cluster
-        ServiceResponse response =
-            prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-        AssertUtil.assertSucceeded(response);
-        feed = bundles[0].getInputFeedFromBundle();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Submit correctly adjusted feed. Response should reflect success.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitValidFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Submit and remove feed. Try to submit it again. Response should reflect success.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitValidFeedPostDeletion() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().delete(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Submit feed. Get its definition. Try to submit it again. Should succeed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitValidFeedPostGet() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().getEntityDefinition(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Try to submit correctly adjusted feed twice. Should succeed.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitValidFeedTwice() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Submit a feed with the path for location-data type empty. Feed submit should fail.
-     *      *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitFeedWithEmptyDataPath() throws Exception {
-        FeedMerlin feedObj = new FeedMerlin(feed);
-        feedObj.setLocation(LocationType.DATA, "");
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedObj.toString());
-        AssertUtil.assertFailedWithStatus(response, HttpStatus.SC_BAD_REQUEST,
-                "Can not create a Path from an empty string");
-    }
-
-    /**
-     * Submit a feed no location type stats. Feed submit should succeed.
-     *
-     * @throws Exception
-     */
-
-    @Test(groups = {"singleCluster"})
-    public void submitFeedWithNoStatsPath() throws Exception {
-        FeedMerlin feedObj = new FeedMerlin(feed);
-        feedObj.getLocations().getLocations().set(1, null);
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedObj.toString());
-        AssertUtil.assertSucceeded(response);
-    }
-
-    /**
-     * Submit a feed with no location type data. Feed submit should fail.
-     *      *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void submitFeedWithNoDataPath() throws Exception {
-        FeedMerlin feedObj = new FeedMerlin(feed);
-        feedObj.getLocations().getLocations().set(0, null);
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feedObj.toString());
-        AssertUtil.assertFailedWithStatus(response, HttpStatus.SC_BAD_REQUEST,
-                "FileSystem based feed but it doesn't contain location type - data");
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSuspendTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSuspendTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSuspendTest.java
deleted file mode 100644
index 5217818..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/FeedSuspendTest.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.response.ServiceResponse;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-
-/**
- * Feed suspend tests.
- */
-@Test(groups = "embedded")
-public class FeedSuspendTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String feed;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        bundles[0] = BundleUtil.readELBundle();
-        bundles[0].generateUniqueBundle(this);
-        bundles[0] = new Bundle(bundles[0], cluster);
-
-        //submit the cluster
-        ServiceResponse response =
-            prism.getClusterHelper().submitEntity(bundles[0].getClusters().get(0));
-        AssertUtil.assertSucceeded(response);
-
-        feed = bundles[0].getInputFeedFromBundle();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     * Schedule feed, suspend it. Check that web response reflects success and feed status is
-     * "suspended".
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void suspendScheduledFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-    }
-
-    /**
-     * Try to suspend running feed twice. Response should reflect success,
-     * feed status should be suspended.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void suspendAlreadySuspendedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-        response = prism.getFeedHelper().suspend(feed);
-
-        AssertUtil.assertSucceeded(response);
-        AssertUtil.checkStatus(clusterOC, EntityType.FEED, feed, Job.Status.SUSPENDED);
-    }
-
-    /**
-     * Remove feed. Attempt to suspend it should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void suspendDeletedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitAndSchedule(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().delete(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertFailed(response);
-    }
-
-    /**
-     * Attempt to suspend non existent feed should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void suspendNonExistentFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertFailed(response);
-    }
-
-    /**
-     * Attempt to suspend non scheduled feed should fail.
-     *
-     * @throws Exception
-     */
-    @Test(groups = {"singleCluster"})
-    public void suspendSubmittedFeed() throws Exception {
-        ServiceResponse response = prism.getFeedHelper().submitEntity(feed);
-        AssertUtil.assertSucceeded(response);
-
-        response = prism.getFeedHelper().suspend(feed);
-        AssertUtil.assertFailed(response);
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceParamTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceParamTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceParamTest.java
deleted file mode 100644
index 33808bf..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceParamTest.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.OozieClient;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-/**
- * tests for instance option params.
- */
-@Test(groups = "embedded")
-public class InstanceParamTest extends BaseTestClass {
-
-    /**
-     * test cases for https://issues.apache.org/jira/browse/FALCON-263.
-     */
-
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestHDFSDir + "/testInputData" + MINUTE_DATE_PATTERN;
-    private String feedOutputPath = baseTestHDFSDir + "/testOutputData" + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String startTime;
-    private String endTime;
-    private ColoHelper cluster1 = servers.get(0);
-    private OozieClient cluster1OC = serverOC.get(0);
-    private Bundle processBundle;
-    private static final Logger LOGGER = Logger.getLogger(InstanceParamTest.class);
-    private String processName;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        startTime = TimeUtil.get20roundedTime(TimeUtil.getTimeWrtSystemTime(-20));
-        endTime = TimeUtil.getTimeWrtSystemTime(60);
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        processBundle = new Bundle(BundleUtil.readELBundle(), cluster1);
-        processBundle.generateUniqueBundle(this);
-        processBundle.setInputFeedDataPath(feedInputPath);
-        processBundle.setOutputFeedLocationData(feedOutputPath);
-        processBundle.setProcessWorkflow(aggregateWorkflowDir);
-        for (int i = 0; i < 3; i++) {
-            bundles[i] = new Bundle(BundleUtil.readELBundle(), servers.get(i));
-            bundles[i].generateUniqueBundle(this);
-            bundles[i].setInputFeedDataPath(feedInputPath);
-            bundles[i].setOutputFeedLocationData(feedOutputPath);
-            bundles[i].setProcessWorkflow(aggregateWorkflowDir);
-        }
-        processName = processBundle.getProcessName();
-    }
-
-    /**
-     * Schedule process. Get params of waiting instance.
-     */
-    @Test(timeOut = 1200000, enabled = false)
-    public void getParamsValidRequestInstanceWaiting()
-        throws URISyntaxException, JAXBException, AuthenticationException, IOException,
-        OozieClientException, InterruptedException {
-        processBundle.setProcessValidity(startTime, endTime);
-        processBundle.addClusterToBundle(bundles[1].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.addClusterToBundle(bundles[2].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, processBundle.getProcessData(), 0);
-        InstancesResult r = prism.getProcessHelper().getInstanceParams(processName,
-            "?start=" + startTime);
-        r.getMessage();
-    }
-
-    /**
-     * Schedule process. Wait till instance succeeded. Get its params.
-     */
-    @Test(timeOut = 1200000, enabled = true)
-    public void getParamsValidRequestInstanceSucceeded()
-        throws URISyntaxException, JAXBException, AuthenticationException, IOException,
-        OozieClientException, InterruptedException {
-        processBundle.setProcessValidity(startTime, endTime);
-        processBundle.addClusterToBundle(bundles[1].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.addClusterToBundle(bundles[2].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, processBundle.getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster1, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, processName, 1,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS, 10);
-        InstancesResult r = prism.getProcessHelper()
-            .getInstanceParams(processName, "?start=" + startTime);
-        LOGGER.info(r.getMessage());
-    }
-
-    /**
-     *  Schedule process. Wait till instance got killed. Get its params.
-     */
-    @Test(timeOut = 1200000, enabled = false)
-    public void getParamsValidRequestInstanceKilled()
-        throws URISyntaxException, JAXBException, AuthenticationException, IOException,
-        OozieClientException, InterruptedException {
-        processBundle.setProcessValidity(startTime, endTime);
-        processBundle.addClusterToBundle(bundles[1].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.addClusterToBundle(bundles[2].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(cluster1OC, processBundle.getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster1, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(cluster1OC, processName, 0,
-            CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS); //change according to test case
-        InstancesResult r = prism.getProcessHelper()
-            .getInstanceParams(processName, "?start=" + startTime);
-        r.getMessage();
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-        for (FileSystem fs : serverFS) {
-            HadoopUtil.deleteDirIfExists(Util.getPathPrefix(feedInputPath), fs);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceSummaryTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceSummaryTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceSummaryTest.java
deleted file mode 100644
index 137491d..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/InstanceSummaryTest.java
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.regression.Entities.FeedMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.feed.ActionType;
-import org.apache.falcon.entity.v0.feed.ClusterType;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.falcon.resource.InstancesSummaryResult;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.oozie.client.CoordinatorAction.Status;
-import org.apache.oozie.client.OozieClientException;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.text.ParseException;
-import java.util.List;
-
-/** This test currently provide minimum verification. More detailed test should be added:
-    1. process : test summary single cluster few instance some future some past
-    2. process : test multiple cluster, full past on one cluster,  full future on one cluster,
-    half future / past on third one
-    3. feed : same as test 1 for feed
-    4. feed : same as test 2 for feed
- */
-@Test(groups = "embedded")
-public class InstanceSummaryTest extends BaseTestClass {
-
-    private String baseTestHDFSDir = cleanAndGetTestDir();
-    private String feedInputPath = baseTestHDFSDir + "/testInputData" + MINUTE_DATE_PATTERN;
-    private String aggregateWorkflowDir = baseTestHDFSDir + "/aggregator";
-    private String startTime;
-    private String endTime;
-    private ColoHelper cluster3 = servers.get(2);
-    private Bundle processBundle;
-    private String processName;
-
-    @BeforeClass(alwaysRun = true)
-    public void createTestData() throws Exception {
-        uploadDirToClusters(aggregateWorkflowDir, OSUtil.RESOURCES_OOZIE);
-        startTime = TimeUtil.get20roundedTime(TimeUtil.getTimeWrtSystemTime(-20));
-        endTime = TimeUtil.getTimeWrtSystemTime(60);
-        String startTimeData = TimeUtil.addMinsToTime(startTime, -100);
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startTimeData, endTime, 20);
-        for (FileSystem fs : serverFS) {
-            HadoopUtil.deleteDirIfExists(Util.getPathPrefix(feedInputPath), fs);
-            HadoopUtil.flattenAndPutDataInFolder(fs, OSUtil.NORMAL_INPUT,
-                Util.getPathPrefix(feedInputPath), dataDates);
-        }
-    }
-
-    @BeforeMethod(alwaysRun = true)
-    public void setup() throws Exception {
-        processBundle = new Bundle(BundleUtil.readELBundle(), cluster3);
-        processBundle.generateUniqueBundle(this);
-        processBundle.setInputFeedDataPath(feedInputPath);
-        processBundle.setOutputFeedLocationData(baseTestHDFSDir + "/output" + MINUTE_DATE_PATTERN);
-        processBundle.setProcessWorkflow(aggregateWorkflowDir);
-
-        for (int i = 0; i < 3; i++) {
-            bundles[i] = new Bundle(BundleUtil.readELBundle(), servers.get(i));
-            bundles[i].generateUniqueBundle(this);
-            bundles[i].setProcessWorkflow(aggregateWorkflowDir);
-        }
-        processName = Util.readEntityName(processBundle.getProcessData());
-    }
-
-    /**
-     *  Schedule single-cluster process. Get its instances summary.
-     */
-    @Test(enabled = true, timeOut = 1200000)
-    public void testSummarySingleClusterProcess()
-        throws URISyntaxException, JAXBException, IOException, ParseException,
-        OozieClientException, AuthenticationException, InterruptedException {
-        processBundle.setProcessValidity(startTime, endTime);
-        processBundle.submitFeedsScheduleProcess(prism);
-        InstanceUtil.waitTillInstancesAreCreated(serverOC.get(2), processBundle.getProcessData(), 0);
-
-        // start only at start time
-        InstancesSummaryResult r = prism.getProcessHelper()
-            .getInstanceSummary(processName, "?start=" + startTime);
-        InstanceUtil.waitTillInstanceReachState(serverOC.get(2), processName, 2,
-            Status.SUCCEEDED, EntityType.PROCESS);
-
-        //AssertUtil.assertSucceeded(r);
-
-        //start only before process start
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, -100));
-        //AssertUtil.assertFailed(r,"response should have failed");
-
-        //start only after process end
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, 120));
-
-
-        //start only at mid specific instance
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, 10));
-
-        //start only in between 2 instance
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, 7));
-
-        //start and end at start and end
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + endTime);
-
-        //start in between and end at end
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + TimeUtil.addMinsToTime(startTime, 14) + "&end=" + endTime);
-
-        //start at start and end between
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + TimeUtil.addMinsToTime(endTime, -20));
-
-        // start and end in between
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, 20)
-                    + "&end=" + TimeUtil.addMinsToTime(endTime, -13));
-
-        //start before start with end in between
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, -100)
-                    + "&end=" + TimeUtil.addMinsToTime(endTime, -37));
-
-        //start in between and end after end
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?start=" + TimeUtil.addMinsToTime(startTime, 60)
-                    + "&end=" + TimeUtil.addMinsToTime(endTime, 100));
-
-        // both start end out od range
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + TimeUtil.addMinsToTime(startTime, -100)
-                + "&end=" + TimeUtil.addMinsToTime(endTime, 100));
-
-        // end only
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-                "?end=" + TimeUtil.addMinsToTime(endTime, -30));
-    }
-
-    /**
-     * Adjust multi-cluster process. Submit and schedule it. Get its instances summary.
-     */
-    @Test(enabled = true, timeOut = 1200000)
-    public void testSummaryMultiClusterProcess() throws JAXBException,
-            ParseException, IOException, URISyntaxException, AuthenticationException,
-            InterruptedException {
-        processBundle.setProcessValidity(startTime, endTime);
-        processBundle.addClusterToBundle(bundles[1].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.addClusterToBundle(bundles[2].getClusters().get(0),
-            ClusterType.SOURCE, null, null);
-        processBundle.submitFeedsScheduleProcess(prism);
-        InstancesSummaryResult r = prism.getProcessHelper()
-            .getInstanceSummary(processName, "?start=" + startTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-             "?start=" + startTime + "&end=" + endTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-             "?start=" + startTime + "&end=" + endTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + endTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + endTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + endTime);
-
-        r = prism.getProcessHelper().getInstanceSummary(processName,
-            "?start=" + startTime + "&end=" + endTime);
-    }
-
-    /**
-     *  Adjust multi-cluster feed. Submit and schedule it. Get its instances summary.
-     */
-    @Test(enabled = true, timeOut = 1200000)
-    public void testSummaryMultiClusterFeed() throws JAXBException, ParseException, IOException,
-            URISyntaxException, OozieClientException, AuthenticationException,
-            InterruptedException {
-
-        //create desired feed
-        String feed = bundles[0].getDataSets().get(0);
-
-        //cluster_1 is target, cluster_2 is source and cluster_3 is neutral
-        feed = FeedMerlin.fromString(feed).clearFeedClusters().toString();
-
-        feed = FeedMerlin.fromString(feed).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[2].getClusters().get(0)))
-                .withRetention("days(100000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-10-01T12:10Z")
-                .build()).toString();
-
-        feed = FeedMerlin.fromString(feed).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[0].getClusters().get(0)))
-                .withRetention("days(100000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-10-01T12:25Z")
-                .withClusterType(ClusterType.TARGET)
-                .withDataLocation(feedInputPath)
-                .build()).toString();
-
-        feed = FeedMerlin.fromString(feed).addFeedCluster(
-            new FeedMerlin.FeedClusterBuilder(Util.readEntityName(bundles[1].getClusters().get(0)))
-                .withRetention("days(100000)", ActionType.DELETE)
-                .withValidity(startTime, "2099-01-01T00:00Z")
-                .withClusterType(ClusterType.SOURCE)
-                .withDataLocation(feedInputPath)
-                .build()).toString();
-
-        //submit clusters
-        Bundle.submitCluster(bundles[0], bundles[1], bundles[2]);
-
-        //create test data on cluster_2
-      /*InstanceUtil.createDataWithinDatesAndPrefix(cluster2,
-        InstanceUtil.oozieDateToDate(startTime),
-        InstanceUtil.oozieDateToDate(InstanceUtil.getTimeWrtSystemTime(60)),
-        feedInputPath, 1);*/
-
-        //submit and schedule feed
-        prism.getFeedHelper().submitAndSchedule(feed);
-
-        InstancesSummaryResult r = prism.getFeedHelper()
-            .getInstanceSummary(Util.readEntityName(feed), "?start=" + startTime);
-
-        r = prism.getFeedHelper().getInstanceSummary(Util.readEntityName(feed),
-            "?start=" + startTime + "&end=" + TimeUtil.addMinsToTime(endTime, -20));
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() throws IOException {
-        removeTestClassEntities();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/LogMoverTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/LogMoverTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/LogMoverTest.java
deleted file mode 100644
index f936305..0000000
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/LogMoverTest.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.regression;
-
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.Frequency.TimeUnit;
-import org.apache.falcon.regression.Entities.ProcessMerlin;
-import org.apache.falcon.regression.core.bundle.Bundle;
-import org.apache.falcon.regression.core.helpers.ColoHelper;
-import org.apache.falcon.regression.core.util.AssertUtil;
-import org.apache.falcon.regression.core.util.BundleUtil;
-import org.apache.falcon.regression.core.util.HadoopUtil;
-import org.apache.falcon.regression.core.util.InstanceUtil;
-import org.apache.falcon.regression.core.util.OSUtil;
-import org.apache.falcon.regression.core.util.OozieUtil;
-import org.apache.falcon.regression.core.util.TimeUtil;
-import org.apache.falcon.regression.core.util.Util;
-import org.apache.falcon.regression.testHelper.BaseTestClass;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.log4j.Logger;
-import org.apache.oozie.client.CoordinatorAction;
-import org.apache.oozie.client.Job;
-import org.apache.oozie.client.OozieClient;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-/**
- * LogMover Test.
- * Adds job launcher success/failure logs to falcon staging directory.
- * It is not working for map-reduce actions(FALCON-1038).
- * Using pig-action to test this feature.
- */
-@Test(groups = "embedded")
-public class LogMoverTest extends BaseTestClass {
-
-    private ColoHelper cluster = servers.get(0);
-    private FileSystem clusterFS = serverFS.get(0);
-    private OozieClient clusterOC = serverOC.get(0);
-    private String pigTestDir = cleanAndGetTestDir();
-    private String aggregateWorkflowDir = cleanAndGetTestDir() + "/aggregator";
-    private String inputPath = pigTestDir + "/input" + MINUTE_DATE_PATTERN;
-    private String propPath = pigTestDir + "/LogMover";
-    private static final Logger LOGGER = Logger.getLogger(LogMoverTest.class);
-    private String processName;
-    private String process;
-    private String startDate;
-    private String endDate;
-
-    @BeforeMethod(alwaysRun = true)
-    public void setUp() throws Exception {
-        LOGGER.info("in @BeforeMethod");
-        startDate = TimeUtil.getTimeWrtSystemTime(-3);
-        endDate = TimeUtil.getTimeWrtSystemTime(3);
-
-        LOGGER.info("startDate : " + startDate + " , endDate : " + endDate);
-        //copy pig script and workflow
-        HadoopUtil.uploadDir(clusterFS, aggregateWorkflowDir, OSUtil.concat(OSUtil.RESOURCES, "LogMover"));
-        Bundle bundle = BundleUtil.readELBundle();
-        bundles[0] = new Bundle(bundle, cluster);
-        bundles[0].generateUniqueBundle(this);
-        bundles[0].setInputFeedDataPath(inputPath);
-        bundles[0].setInputFeedPeriodicity(1, TimeUnit.minutes);
-        bundles[0].setOutputFeedLocationData(pigTestDir + "/output-data" + MINUTE_DATE_PATTERN);
-        bundles[0].setOutputFeedAvailabilityFlag("_SUCCESS");
-        bundles[0].setProcessWorkflow(aggregateWorkflowDir);
-        bundles[0].setProcessInputNames("INPUT");
-        bundles[0].setProcessOutputNames("OUTPUT");
-        bundles[0].setProcessValidity(startDate, endDate);
-        bundles[0].setProcessPeriodicity(1, TimeUnit.minutes);
-        bundles[0].setOutputFeedPeriodicity(1, TimeUnit.minutes);
-
-        List<String> dataDates = TimeUtil.getMinuteDatesOnEitherSide(startDate, endDate, 20);
-        HadoopUtil.flattenAndPutDataInFolder(clusterFS, OSUtil.NORMAL_INPUT,
-                bundles[0].getFeedDataPathPrefix(), dataDates);
-
-        // Defining path to be used in pig script
-        final ProcessMerlin processElement = bundles[0].getProcessObject();
-        processElement.clearProperties().withProperty("inputPath", propPath);
-        bundles[0].setProcessData(processElement.toString());
-        process = bundles[0].getProcessData();
-        processName = Util.readEntityName(process);
-
-    }
-
-    @AfterMethod(alwaysRun = true)
-    public void tearDown() {
-        removeTestClassEntities();
-    }
-
-    /**
-     *Schedule a process. It should succeed and job launcher success information
-     * should be present in falcon staging directory.
-     */
-    @Test(groups = {"singleCluster"})
-    public void logMoverSucceedTest() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-
-        //Copy data to let pig job succeed
-        HadoopUtil.copyDataToFolder(clusterFS, propPath, OSUtil.concat(OSUtil.RESOURCES, "pig"));
-
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 1,
-                CoordinatorAction.Status.SUCCEEDED, EntityType.PROCESS);
-
-        AssertUtil.assertLogMoverPath(true, processName, clusterFS, "process", "Success logs are not present");
-    }
-
-    /**
-     *Schedule a process. It should fail and job launcher failure information
-     * should be present in falcon staging directory.
-     */
-    @Test(groups = {"singleCluster"})
-    public void logMoverFailTest() throws Exception {
-        bundles[0].submitFeedsScheduleProcess(prism);
-        AssertUtil.checkStatus(clusterOC, EntityType.PROCESS, process, Job.Status.RUNNING);
-
-        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
-        OozieUtil.createMissingDependencies(cluster, EntityType.PROCESS, processName, 0);
-        InstanceUtil.waitTillInstanceReachState(clusterOC, bundles[0].getProcessName(), 1,
-                        CoordinatorAction.Status.KILLED, EntityType.PROCESS);
-
-        AssertUtil.assertLogMoverPath(false, processName, clusterFS, "process", "Failed logs are not present");
-    }
-
-}


[48/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/Tag.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/Tag.java b/client/src/main/java/org/apache/falcon/Tag.java
deleted file mode 100644
index a8d60b6..0000000
--- a/client/src/main/java/org/apache/falcon/Tag.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon;
-
-import org.apache.falcon.entity.v0.EntityType;
-
-/**
- * Tag to include in the entity type.
- */
-public enum Tag {
-    DEFAULT(EntityType.PROCESS), RETENTION(EntityType.FEED), REPLICATION(EntityType.FEED),
-        IMPORT(EntityType.FEED), EXPORT(EntityType.FEED);
-
-    private final EntityType entityType;
-
-    private Tag(EntityType entityType) {
-        this.entityType = entityType;
-    }
-
-    public EntityType getType() {
-        return entityType;
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/CLIParser.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/CLIParser.java b/client/src/main/java/org/apache/falcon/cli/CLIParser.java
deleted file mode 100644
index 9d8554f..0000000
--- a/client/src/main/java/org/apache/falcon/cli/CLIParser.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.HelpFormatter;
-
-import java.io.PrintWriter;
-import java.text.MessageFormat;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-
-/**
- * Cannibalized from Oozie CLIParser into Falcon
- * Command line parser based on Apache common-cli 1.x that supports subcommands.
- */
-public class CLIParser {
-    private static final String LEFT_PADDING = "      ";
-
-    private String cliName;
-    private String[] cliHelp;
-    private Map<String, Options> commands = new LinkedHashMap<String, Options>();
-    private Map<String, Boolean> commandWithArgs = new LinkedHashMap<String, Boolean>();
-    private Map<String, String> commandsHelp = new LinkedHashMap<String, String>();
-
-    /**
-     * Create a parser.
-     *
-     * @param aCliName name of the parser, for help purposes.
-     * @param aCliHelp help for the CLI.
-     */
-    public CLIParser(String aCliName, String[] aCliHelp) {
-        this.cliName = aCliName;
-        this.cliHelp = aCliHelp.clone();
-    }
-
-    /**
-     * Add a command to the parser.
-     *
-     * @param command        comand name.
-     * @param argsHelp       command arguments help.
-     * @param commandHelp    command description.
-     * @param commandOptions command options.
-     * @param hasArguments   has args
-     */
-    public void addCommand(String command, String argsHelp, String commandHelp, Options commandOptions,
-                           boolean hasArguments) {
-        String helpMsg = argsHelp + ((hasArguments) ? "<ARGS> " : "") + ": " + commandHelp;
-        commandsHelp.put(command, helpMsg);
-        commands.put(command, commandOptions);
-        commandWithArgs.put(command, hasArguments);
-    }
-
-    /**
-     * Bean that represents a parsed command.
-     */
-    public static final class Command {
-        private String name;
-        private CommandLine commandLine;
-
-        private Command(String name, CommandLine commandLine) {
-            this.name = name;
-            this.commandLine = commandLine;
-        }
-
-        /**
-         * Return the command name.
-         *
-         * @return the command name.
-         */
-        public String getName() {
-            return name;
-        }
-
-        /**
-         * Return the command line.
-         *
-         * @return the command line.
-         */
-        public CommandLine getCommandLine() {
-            return commandLine;
-        }
-    }
-
-    /**
-     * Parse a array of arguments into a command.
-     *
-     * @param args array of arguments.
-     * @return the parsed Command.
-     * @throws ParseException thrown if the arguments could not be parsed.
-     */
-    public Command parse(String[] args) throws ParseException {
-        if (args.length == 0) {
-            throw new ParseException("missing sub-command");
-        } else {
-            if (commands.containsKey(args[0])) {
-                GnuParser parser = new GnuParser();
-                String[] minusCommand = new String[args.length - 1];
-                System.arraycopy(args, 1, minusCommand, 0, minusCommand.length);
-                return new Command(args[0], parser.parse(commands.get(args[0]), minusCommand,
-                        commandWithArgs.get(args[0])));
-            } else {
-                throw new ParseException(MessageFormat.format("invalid sub-command [{0}]", args[0]));
-            }
-        }
-    }
-
-    public String shortHelp() {
-        return "use 'help' sub-command for help details";
-    }
-
-    /**
-     * Print the help for the parser to standard output.
-     */
-    public void showHelp() {
-        PrintWriter pw = new PrintWriter(System.out);
-        pw.println("usage: ");
-        for (String s : cliHelp) {
-            pw.println(LEFT_PADDING + s);
-        }
-        pw.println();
-        HelpFormatter formatter = new HelpFormatter();
-        for (Map.Entry<String, Options> entry : commands.entrySet()) {
-            String s = LEFT_PADDING + cliName + " " + entry.getKey() + " ";
-            if (entry.getValue().getOptions().size() > 0) {
-                pw.println(s + "<OPTIONS> " + commandsHelp.get(entry.getKey()));
-                formatter.printOptions(pw, 100, entry.getValue(), s.length(), 3);
-            } else {
-                pw.println(s + commandsHelp.get(entry.getKey()));
-            }
-            pw.println();
-        }
-        pw.flush();
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java
deleted file mode 100644
index 6360743..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconAdminCLI.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.Options;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Admin extension to Falcon Command Line Interface - wraps the RESTful API for admin commands.
- */
-public class FalconAdminCLI extends FalconCLI {
-
-    private static final String STACK_OPTION = "stack";
-
-    public FalconAdminCLI() throws Exception {
-        super();
-    }
-
-    public Options createAdminOptions() {
-        Options adminOptions = new Options();
-        Option url = new Option(URL_OPTION, true, "Falcon URL");
-        adminOptions.addOption(url);
-
-        OptionGroup group = new OptionGroup();
-        Option status = new Option(STATUS_OPT, false,
-                "show the current system status");
-        Option version = new Option(VERSION_OPT, false,
-                "show Falcon server build version");
-        Option stack = new Option(STACK_OPTION, false,
-                "show the thread stack dump");
-        Option doAs = new Option(DO_AS_OPT, true,
-                "doAs user");
-        Option help = new Option("help", false, "show Falcon help");
-        Option debug = new Option(DEBUG_OPTION, false, "Use debug mode to see debugging statements on stdout");
-        group.addOption(status);
-        group.addOption(version);
-        group.addOption(stack);
-        group.addOption(help);
-
-        adminOptions.addOptionGroup(group);
-        adminOptions.addOption(doAs);
-        adminOptions.addOption(debug);
-        return adminOptions;
-    }
-
-    public int adminCommand(CommandLine commandLine, FalconClient client,
-                             String falconUrl) throws FalconCLIException, IOException {
-        String result;
-        Set<String> optionsList = new HashSet<String>();
-        for (Option option : commandLine.getOptions()) {
-            optionsList.add(option.getOpt());
-        }
-
-        String doAsUser = commandLine.getOptionValue(DO_AS_OPT);
-
-        if (optionsList.contains(STACK_OPTION)) {
-            result = client.getThreadDump(doAsUser);
-            OUT.get().println(result);
-        }
-
-        int exitValue = 0;
-        if (optionsList.contains(STATUS_OPT)) {
-            try {
-                int status = client.getStatus(doAsUser);
-                if (status != 200) {
-                    ERR.get().println("Falcon server is not fully operational (on " + falconUrl + "). "
-                            + "Please check log files.");
-                    exitValue = status;
-                } else {
-                    OUT.get().println("Falcon server is running (on " + falconUrl + ")");
-                }
-            } catch (Exception e) {
-                ERR.get().println("Falcon server doesn't seem to be running on " + falconUrl);
-                exitValue = -1;
-            }
-        } else if (optionsList.contains(VERSION_OPT)) {
-            result = client.getVersion(doAsUser);
-            OUT.get().println("Falcon server build version: " + result);
-        } else if (optionsList.contains(HELP_CMD)) {
-            OUT.get().println("Falcon Help");
-        }
-        return exitValue;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconCLI.java
deleted file mode 100644
index a1f42ce..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconCLI.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import com.sun.jersey.api.client.ClientHandlerException;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesSummaryResult;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Falcon Command Line Interface - wraps the RESTful API.
- */
-public class FalconCLI {
-
-    public static final AtomicReference<PrintStream> ERR = new AtomicReference<PrintStream>(System.err);
-    public static final AtomicReference<PrintStream> OUT = new AtomicReference<PrintStream>(System.out);
-
-    public static final String ENV_FALCON_DEBUG = "FALCON_DEBUG";
-    public static final String DEBUG_OPTION = "debug";
-    public static final String URL_OPTION = "url";
-    private static final String FALCON_URL = "FALCON_URL";
-
-    public static final String ADMIN_CMD = "admin";
-    public static final String HELP_CMD = "help";
-    public static final String METADATA_CMD = "metadata";
-    public static final String ENTITY_CMD = "entity";
-    public static final String INSTANCE_CMD = "instance";
-    public static final String RECIPE_CMD = "recipe";
-
-    public static final String TYPE_OPT = "type";
-    public static final String COLO_OPT = "colo";
-    public static final String CLUSTER_OPT = "cluster";
-    public static final String FEED_OPT = "feed";
-    public static final String PROCESS_OPT = "process";
-    public static final String ENTITY_NAME_OPT = "name";
-    public static final String FILE_PATH_OPT = "file";
-    public static final String VERSION_OPT = "version";
-    public static final String SCHEDULE_OPT = "schedule";
-    public static final String SUSPEND_OPT = "suspend";
-    public static final String RESUME_OPT = "resume";
-    public static final String STATUS_OPT = "status";
-    public static final String SUMMARY_OPT = "summary";
-    public static final String DEPENDENCY_OPT = "dependency";
-    public static final String LIST_OPT = "list";
-    public static final String SKIPDRYRUN_OPT = "skipDryRun";
-    public static final String FILTER_BY_OPT = "filterBy";
-    public static final String ORDER_BY_OPT = "orderBy";
-    public static final String SORT_ORDER_OPT = "sortOrder";
-    public static final String OFFSET_OPT = "offset";
-    public static final String NUM_RESULTS_OPT = "numResults";
-    public static final String START_OPT = "start";
-    public static final String END_OPT = "end";
-    public static final String CURRENT_COLO = "current.colo";
-    public static final String CLIENT_PROPERTIES = "/client.properties";
-    public static final String DO_AS_OPT = "doAs";
-
-    private final Properties clientProperties;
-
-    public FalconCLI() throws Exception {
-        clientProperties = getClientProperties();
-    }
-
-    /**
-     * Recipe operation enum.
-     */
-    public enum RecipeOperation {
-        HDFS_REPLICATION,
-        HIVE_DISASTER_RECOVERY
-    }
-
-    /**
-     * Entry point for the Falcon CLI when invoked from the command line. Upon
-     * completion this method exits the JVM with '0' (success) or '-1'
-     * (failure).
-     *
-     * @param args options and arguments for the Falcon CLI.
-     */
-    public static void main(final String[] args) throws Exception {
-        System.exit(new FalconCLI().run(args));
-    }
-
-    // TODO help and headers
-    private static final String[] FALCON_HELP = { "the env variable '" + FALCON_URL
-                                                          + "' is used as default value for the '-" + URL_OPTION
-                                                          + "' option",
-                                                  "custom headers for Falcon web services can be specified using '-D"
-                                                          + FalconClient.WS_HEADER_PREFIX + "NAME=VALUE'", };
-    /**
-     * Run a CLI programmatically.
-     * <p/>
-     * It does not exit the JVM.
-     * <p/>
-     * A CLI instance can be used only once.
-     *
-     * @param args options and arguments for the Oozie CLI.
-     * @return '0' (success), '-1' (failure).
-     */
-    public synchronized int run(final String[] args) throws Exception {
-
-        CLIParser parser = new CLIParser("falcon", FALCON_HELP);
-
-        FalconAdminCLI adminCLI = new FalconAdminCLI();
-        FalconEntityCLI entityCLI = new FalconEntityCLI();
-        FalconInstanceCLI instanceCLI = new FalconInstanceCLI();
-        FalconMetadataCLI metadataCLI = new FalconMetadataCLI();
-        FalconRecipeCLI recipeCLI = new FalconRecipeCLI();
-
-        parser.addCommand(ADMIN_CMD, "", "admin operations", adminCLI.createAdminOptions(), true);
-        parser.addCommand(HELP_CMD, "", "display usage", new Options(), false);
-        parser.addCommand(ENTITY_CMD, "",
-                "Entity operations like submit, suspend, resume, delete, status, definition, submitAndSchedule",
-                entityCLI.createEntityOptions(), false);
-        parser.addCommand(INSTANCE_CMD, "",
-                "Process instances operations like running, status, kill, suspend, resume, rerun, logs",
-                instanceCLI.createInstanceOptions(), false);
-        parser.addCommand(METADATA_CMD, "", "Metadata operations like list, relations",
-                metadataCLI.createMetadataOptions(), true);
-        parser.addCommand(RECIPE_CMD, "", "recipe operations", recipeCLI.createRecipeOptions(), true);
-        parser.addCommand(VERSION_OPT, "", "show client version", new Options(), false);
-
-        try {
-            CLIParser.Command command = parser.parse(args);
-            int exitValue = 0;
-            if (command.getName().equals(HELP_CMD)) {
-                parser.showHelp();
-            } else {
-                CommandLine commandLine = command.getCommandLine();
-                String falconUrl = getFalconEndpoint(commandLine);
-                FalconClient client = new FalconClient(falconUrl, clientProperties);
-
-                setDebugMode(client, commandLine.hasOption(DEBUG_OPTION));
-                if (command.getName().equals(ADMIN_CMD)) {
-                    exitValue = adminCLI.adminCommand(commandLine, client, falconUrl);
-                } else if (command.getName().equals(ENTITY_CMD)) {
-                    entityCLI.entityCommand(commandLine, client);
-                } else if (command.getName().equals(INSTANCE_CMD)) {
-                    instanceCLI.instanceCommand(commandLine, client);
-                } else if (command.getName().equals(METADATA_CMD)) {
-                    metadataCLI.metadataCommand(commandLine, client);
-                } else if (command.getName().equals(RECIPE_CMD)) {
-                    recipeCLI.recipeCommand(commandLine, client);
-                }
-            }
-            return exitValue;
-        } catch (ParseException ex) {
-            ERR.get().println("Invalid sub-command: " + ex.getMessage());
-            ERR.get().println();
-            ERR.get().println(parser.shortHelp());
-            ERR.get().println("Stacktrace:");
-            ex.printStackTrace();
-            return -1;
-        } catch (ClientHandlerException ex) {
-            ERR.get().print("Unable to connect to Falcon server, "
-                    + "please check if the URL is correct and Falcon server is up and running\n");
-            ERR.get().println("Stacktrace:");
-            ex.printStackTrace();
-            return -1;
-        } catch (FalconCLIException e) {
-            ERR.get().println("ERROR: " + e.getMessage());
-            return -1;
-        } catch (Exception ex) {
-            ERR.get().println("Stacktrace:");
-            ex.printStackTrace();
-            return -1;
-        }
-    }
-
-    protected Integer parseIntegerInput(String optionValue, Integer defaultVal, String optionName)
-        throws FalconCLIException {
-        Integer integer = defaultVal;
-        if (optionValue != null) {
-            try {
-                return Integer.parseInt(optionValue);
-            } catch (NumberFormatException e) {
-                throw new FalconCLIException("Input value provided for queryParam \""+ optionName
-                        +"\" is not a valid Integer");
-            }
-        }
-        return integer;
-    }
-
-    protected void validateEntityTypeForSummary(String type) throws FalconCLIException {
-        EntityType entityType = EntityType.getEnum(type);
-        if (!entityType.isSchedulable()) {
-            throw new FalconCLIException("Invalid entity type " + entityType
-                    + " for EntitySummary API. Valid options are feed or process");
-        }
-    }
-
-    protected void validateNotEmpty(String paramVal, String paramName) throws FalconCLIException {
-        if (StringUtils.isBlank(paramVal)) {
-            throw new FalconCLIException("Missing argument : " + paramName);
-        }
-    }
-
-    protected void validateSortOrder(String sortOrder) throws FalconCLIException {
-        if (!StringUtils.isBlank(sortOrder)) {
-            if (!sortOrder.equalsIgnoreCase("asc") && !sortOrder.equalsIgnoreCase("desc")) {
-                throw new FalconCLIException("Value for param sortOrder should be \"asc\" or \"desc\". It is  : "
-                        + sortOrder);
-            }
-        }
-    }
-
-    protected String getColo(String colo) throws FalconCLIException, IOException {
-        if (colo == null) {
-            Properties prop = getClientProperties();
-            colo = prop.getProperty(CURRENT_COLO, "*");
-        }
-        return colo;
-    }
-
-    protected void validateFilterBy(String filterBy, String filterType) throws FalconCLIException {
-        if (StringUtils.isEmpty(filterBy)) {
-            return;
-        }
-        String[] filterSplits = filterBy.split(",");
-        for (String s : filterSplits) {
-            String[] tempKeyVal = s.split(":", 2);
-            try {
-                if (filterType.equals("entity")) {
-                    EntityList.EntityFilterByFields.valueOf(tempKeyVal[0].toUpperCase());
-                } else if (filterType.equals("instance")) {
-                    InstancesResult.InstanceFilterFields.valueOf(tempKeyVal[0].toUpperCase());
-                }else if (filterType.equals("summary")) {
-                    InstancesSummaryResult.InstanceSummaryFilterFields.valueOf(tempKeyVal[0].toUpperCase());
-                } else {
-                    throw new IllegalArgumentException("Invalid API call: filterType is not valid");
-                }
-            } catch (IllegalArgumentException ie) {
-                throw new FalconCLIException("Invalid filterBy argument : " + tempKeyVal[0] + " in : " + s);
-            }
-        }
-    }
-
-    protected void validateOrderBy(String orderBy, String action) throws FalconCLIException {
-        if (StringUtils.isBlank(orderBy)) {
-            return;
-        }
-        if (action.equals("instance")) {
-            if (Arrays.asList(new String[]{"status", "cluster", "starttime", "endtime"})
-                .contains(orderBy.toLowerCase())) {
-                return;
-            }
-        } else if (action.equals("entity")) {
-            if (Arrays.asList(new String[] {"type", "name"}).contains(orderBy.toLowerCase())) {
-                return;
-            }
-        } else if (action.equals("summary")) {
-            if (Arrays.asList(new String[]{"cluster"})
-                    .contains(orderBy.toLowerCase())) {
-                return;
-            }
-        }
-        throw new FalconCLIException("Invalid orderBy argument : " + orderBy);
-    }
-
-    protected String getFalconEndpoint(CommandLine commandLine) throws FalconCLIException, IOException {
-        String url = commandLine.getOptionValue(URL_OPTION);
-        if (url == null) {
-            url = System.getenv(FALCON_URL);
-        }
-        if (url == null) {
-            if (clientProperties.containsKey("falcon.url")) {
-                url = clientProperties.getProperty("falcon.url");
-            }
-        }
-        if (url == null) {
-            throw new FalconCLIException("Failed to get falcon url from cmdline, or environment or client properties");
-        }
-
-        return url;
-    }
-
-    private void setDebugMode(FalconClient client, boolean debugOpt) {
-        String debug = System.getenv(ENV_FALCON_DEBUG);
-        if (debugOpt) {  // CLI argument "-debug" used
-            client.setDebugMode(true);
-        } else if (StringUtils.isNotBlank(debug)) {
-            System.out.println(ENV_FALCON_DEBUG + ": " + debug);
-            if (debug.trim().toLowerCase().equals("true")) {
-                client.setDebugMode(true);
-            }
-        }
-    }
-
-    private Properties getClientProperties() throws IOException {
-        InputStream inputStream = null;
-        try {
-            inputStream = FalconCLI.class.getResourceAsStream(CLIENT_PROPERTIES);
-            Properties prop = new Properties();
-            if (inputStream != null) {
-                prop.load(inputStream);
-            }
-            return prop;
-        } finally {
-            IOUtils.closeQuietly(inputStream);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java
deleted file mode 100644
index 6f9d620..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconEntityCLI.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.Options;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.ResponseHelper;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.entity.v0.SchemaHelper;
-import org.apache.falcon.resource.EntityList;
-import org.apache.falcon.resource.FeedLookupResult;
-import org.apache.falcon.resource.SchedulableEntityInstanceResult;
-
-import java.io.IOException;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Entity extension to Falcon Command Line Interface - wraps the RESTful API for entities.
- */
-public class FalconEntityCLI extends FalconCLI {
-
-    private static final String SUBMIT_OPT = "submit";
-    private static final String UPDATE_OPT = "update";
-    private static final String DELETE_OPT = "delete";
-    private static final String SUBMIT_AND_SCHEDULE_OPT = "submitAndSchedule";
-    private static final String VALIDATE_OPT = "validate";
-    private static final String DEFINITION_OPT = "definition";
-    public static final String SLA_MISS_ALERT_OPT = "slaAlert";
-
-    private static final String LOOKUP_OPT = "lookup";
-    private static final String PATH_OPT = "path";
-    private static final String TOUCH_OPT = "touch";
-    private static final String PROPS_OPT = "properties";
-    private static final String FIELDS_OPT = "fields";
-    private static final String TAGS_OPT = "tags";
-    private static final String NUM_INSTANCES_OPT = "numInstances";
-    private static final String NAMESEQ_OPT = "nameseq";
-    private static final String TAGKEYS_OPT = "tagkeys";
-    private static final String SHOWSCHEDULER_OPT = "showScheduler";
-
-    public FalconEntityCLI() throws Exception {
-        super();
-    }
-
-    public Options createEntityOptions() {
-
-        Options entityOptions = new Options();
-
-        Option submit = new Option(SUBMIT_OPT, false,
-                "Submits an entity xml to Falcon");
-        Option update = new Option(UPDATE_OPT, false,
-                "Updates an existing entity xml");
-        Option schedule = new Option(SCHEDULE_OPT, false,
-                "Schedules a submited entity in Falcon");
-        Option suspend = new Option(SUSPEND_OPT, false,
-                "Suspends a running entity in Falcon");
-        Option resume = new Option(RESUME_OPT, false,
-                "Resumes a suspended entity in Falcon");
-        Option delete = new Option(DELETE_OPT, false,
-                "Deletes an entity in Falcon, and kills its instance from workflow engine");
-        Option submitAndSchedule = new Option(SUBMIT_AND_SCHEDULE_OPT, false,
-                "Submits and entity to Falcon and schedules it immediately");
-        Option validate = new Option(VALIDATE_OPT, false,
-                "Validates an entity based on the entity type");
-        Option status = new Option(STATUS_OPT, false,
-                "Gets the status of entity");
-        Option definition = new Option(DEFINITION_OPT, false,
-                "Gets the Definition of entity");
-        Option dependency = new Option(DEPENDENCY_OPT, false,
-                "Gets the dependencies of entity");
-        Option list = new Option(LIST_OPT, false,
-                "List entities registered for a type");
-        Option lookup = new Option(LOOKUP_OPT, false, "Lookup a feed given its instance's path");
-        Option slaAlert = new Option(SLA_MISS_ALERT_OPT, false, "Get missing feed instances which missed SLA");
-        Option entitySummary = new Option(SUMMARY_OPT, false,
-                "Get summary of instances for list of entities");
-        Option touch = new Option(TOUCH_OPT, false,
-                "Force update the entity in workflow engine(even without any changes to entity)");
-
-        OptionGroup group = new OptionGroup();
-        group.addOption(submit);
-        group.addOption(update);
-        group.addOption(schedule);
-        group.addOption(suspend);
-        group.addOption(resume);
-        group.addOption(delete);
-        group.addOption(submitAndSchedule);
-        group.addOption(validate);
-        group.addOption(status);
-        group.addOption(definition);
-        group.addOption(dependency);
-        group.addOption(list);
-        group.addOption(lookup);
-        group.addOption(slaAlert);
-        group.addOption(entitySummary);
-        group.addOption(touch);
-
-        Option url = new Option(URL_OPTION, true, "Falcon URL");
-        Option entityType = new Option(TYPE_OPT, true,
-                "Entity type, can be cluster, feed or process xml");
-        Option filePath = new Option(FILE_PATH_OPT, true,
-                "Path to entity xml file");
-        Option entityName = new Option(ENTITY_NAME_OPT, true,
-                "Entity type, can be cluster, feed or process xml");
-        Option start = new Option(START_OPT, true, "Start time is optional for summary");
-        Option end = new Option(END_OPT, true, "End time is optional for summary");
-        Option colo = new Option(COLO_OPT, true, "Colo name");
-        Option cluster = new Option(CLUSTER_OPT, true, "Cluster name");
-        colo.setRequired(false);
-        Option fields = new Option(FIELDS_OPT, true, "Entity fields to show for a request");
-        Option filterBy = new Option(FILTER_BY_OPT, true,
-                "Filter returned entities by the specified status");
-        Option filterTags = new Option(TAGS_OPT, true, "Filter returned entities by the specified tags");
-        Option nameSubsequence = new Option(NAMESEQ_OPT, true, "Subsequence of entity name");
-        Option tagKeywords = new Option(TAGKEYS_OPT, true, "Keywords in tags");
-        Option orderBy = new Option(ORDER_BY_OPT, true,
-                "Order returned entities by this field");
-        Option sortOrder = new Option(SORT_ORDER_OPT, true, "asc or desc order for results");
-        Option offset = new Option(OFFSET_OPT, true,
-                "Start returning entities from this offset");
-        Option numResults = new Option(NUM_RESULTS_OPT, true,
-                "Number of results to return per request");
-        Option numInstances = new Option(NUM_INSTANCES_OPT, true,
-                "Number of instances to return per entity summary request");
-        Option path = new Option(PATH_OPT, true, "Path for a feed's instance");
-        Option skipDryRun = new Option(SKIPDRYRUN_OPT, false, "skip dry run in workflow engine");
-        Option doAs = new Option(DO_AS_OPT, true, "doAs user");
-        Option userProps = new Option(PROPS_OPT, true, "User supplied comma separated key value properties");
-        Option showScheduler = new Option(SHOWSCHEDULER_OPT, false, "To return the scheduler "
-                + "on which the entity is scheduled.");
-        Option debug = new Option(DEBUG_OPTION, false, "Use debug mode to see debugging statements on stdout");
-
-        entityOptions.addOption(url);
-        entityOptions.addOption(path);
-        entityOptions.addOptionGroup(group);
-        entityOptions.addOption(entityType);
-        entityOptions.addOption(entityName);
-        entityOptions.addOption(filePath);
-        entityOptions.addOption(colo);
-        entityOptions.addOption(cluster);
-        entityOptions.addOption(start);
-        entityOptions.addOption(end);
-        entityOptions.addOption(fields);
-        entityOptions.addOption(filterBy);
-        entityOptions.addOption(filterTags);
-        entityOptions.addOption(nameSubsequence);
-        entityOptions.addOption(tagKeywords);
-        entityOptions.addOption(orderBy);
-        entityOptions.addOption(sortOrder);
-        entityOptions.addOption(offset);
-        entityOptions.addOption(numResults);
-        entityOptions.addOption(numInstances);
-        entityOptions.addOption(skipDryRun);
-        entityOptions.addOption(doAs);
-        entityOptions.addOption(userProps);
-        entityOptions.addOption(debug);
-        entityOptions.addOption(showScheduler);
-
-        return entityOptions;
-    }
-
-    public void entityCommand(CommandLine commandLine, FalconClient client) throws FalconCLIException, IOException {
-        Set<String> optionsList = new HashSet<String>();
-        for (Option option : commandLine.getOptions()) {
-            optionsList.add(option.getOpt());
-        }
-
-        String result = null;
-        String entityType = commandLine.getOptionValue(TYPE_OPT);
-        String entityName = commandLine.getOptionValue(ENTITY_NAME_OPT);
-        String filePath = commandLine.getOptionValue(FILE_PATH_OPT);
-        String colo = commandLine.getOptionValue(COLO_OPT);
-        colo = getColo(colo);
-        String cluster = commandLine.getOptionValue(CLUSTER_OPT);
-        String start = commandLine.getOptionValue(START_OPT);
-        String end = commandLine.getOptionValue(END_OPT);
-        String orderBy = commandLine.getOptionValue(ORDER_BY_OPT);
-        String sortOrder = commandLine.getOptionValue(SORT_ORDER_OPT);
-        String filterBy = commandLine.getOptionValue(FILTER_BY_OPT);
-        String filterTags = commandLine.getOptionValue(TAGS_OPT);
-        String nameSubsequence = commandLine.getOptionValue(NAMESEQ_OPT);
-        String tagKeywords = commandLine.getOptionValue(TAGKEYS_OPT);
-        String fields = commandLine.getOptionValue(FIELDS_OPT);
-        String feedInstancePath = commandLine.getOptionValue(PATH_OPT);
-        Integer offset = parseIntegerInput(commandLine.getOptionValue(OFFSET_OPT), 0, "offset");
-        Integer numResults = parseIntegerInput(commandLine.getOptionValue(NUM_RESULTS_OPT),
-                null, "numResults");
-        String doAsUser = commandLine.getOptionValue(DO_AS_OPT);
-
-        Integer numInstances = parseIntegerInput(commandLine.getOptionValue(NUM_INSTANCES_OPT), 7, "numInstances");
-        Boolean skipDryRun = null;
-        if (optionsList.contains(SKIPDRYRUN_OPT)) {
-            skipDryRun = true;
-        }
-
-        String userProps = commandLine.getOptionValue(PROPS_OPT);
-        boolean showScheduler = false;
-        if (optionsList.contains(SHOWSCHEDULER_OPT)) {
-            showScheduler = true;
-        }
-
-        EntityType entityTypeEnum = null;
-        if (optionsList.contains(LIST_OPT)) {
-            if (entityType == null) {
-                entityType = "";
-            }
-            if (StringUtils.isNotEmpty(entityType)) {
-                String[] types = entityType.split(",");
-                for (String type : types) {
-                    EntityType.getEnum(type);
-                }
-            }
-        } else {
-            validateNotEmpty(entityType, TYPE_OPT);
-            entityTypeEnum = EntityType.getEnum(entityType);
-        }
-        validateSortOrder(sortOrder);
-        String entityAction = "entity";
-
-        if (optionsList.contains(SLA_MISS_ALERT_OPT)) {
-            validateNotEmpty(entityType, TYPE_OPT);
-            validateNotEmpty(start, START_OPT);
-            parseDateString(start);
-            parseDateString(end);
-            SchedulableEntityInstanceResult response = client.getFeedSlaMissPendingAlerts(entityType,
-                    entityName, start, end, colo);
-            result = ResponseHelper.getString(response);
-        } else if (optionsList.contains(SUBMIT_OPT)) {
-            validateNotEmpty(filePath, "file");
-            validateColo(optionsList);
-            result = client.submit(entityType, filePath, doAsUser).getMessage();
-        } else if (optionsList.contains(LOOKUP_OPT)) {
-            validateNotEmpty(feedInstancePath, PATH_OPT);
-            FeedLookupResult resp = client.reverseLookUp(entityType, feedInstancePath, doAsUser);
-            result = ResponseHelper.getString(resp);
-        } else if (optionsList.contains(UPDATE_OPT)) {
-            validateNotEmpty(filePath, "file");
-            validateColo(optionsList);
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            result = client.update(entityType, entityName, filePath, skipDryRun, doAsUser).getMessage();
-        } else if (optionsList.contains(SUBMIT_AND_SCHEDULE_OPT)) {
-            validateNotEmpty(filePath, "file");
-            validateColo(optionsList);
-            result = client.submitAndSchedule(entityType, filePath, skipDryRun, doAsUser, userProps).getMessage();
-        } else if (optionsList.contains(VALIDATE_OPT)) {
-            validateNotEmpty(filePath, "file");
-            validateColo(optionsList);
-            result = client.validate(entityType, filePath, skipDryRun, doAsUser).getMessage();
-        } else if (optionsList.contains(SCHEDULE_OPT)) {
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            colo = getColo(colo);
-            result = client.schedule(entityTypeEnum, entityName, colo, skipDryRun, doAsUser, userProps).getMessage();
-        } else if (optionsList.contains(SUSPEND_OPT)) {
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            colo = getColo(colo);
-            result = client.suspend(entityTypeEnum, entityName, colo, doAsUser).getMessage();
-        } else if (optionsList.contains(RESUME_OPT)) {
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            colo = getColo(colo);
-            result = client.resume(entityTypeEnum, entityName, colo, doAsUser).getMessage();
-        } else if (optionsList.contains(DELETE_OPT)) {
-            validateColo(optionsList);
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            result = client.delete(entityTypeEnum, entityName, doAsUser).getMessage();
-        } else if (optionsList.contains(STATUS_OPT)) {
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            colo = getColo(colo);
-            result = client.getStatus(entityTypeEnum, entityName, colo, doAsUser, showScheduler).getMessage();
-        } else if (optionsList.contains(DEFINITION_OPT)) {
-            validateColo(optionsList);
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            result = client.getDefinition(entityType, entityName, doAsUser).toString();
-        } else if (optionsList.contains(DEPENDENCY_OPT)) {
-            validateColo(optionsList);
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            result = client.getDependency(entityType, entityName, doAsUser).toString();
-        } else if (optionsList.contains(LIST_OPT)) {
-            validateColo(optionsList);
-            validateEntityFields(fields);
-            validateOrderBy(orderBy, entityAction);
-            validateFilterBy(filterBy, entityAction);
-            EntityList entityList = client.getEntityList(entityType, fields, nameSubsequence, tagKeywords,
-                    filterBy, filterTags, orderBy, sortOrder, offset, numResults, doAsUser);
-            result = entityList != null ? entityList.toString() : "No entity of type (" + entityType + ") found.";
-        }  else if (optionsList.contains(SUMMARY_OPT)) {
-            validateEntityTypeForSummary(entityType);
-            validateNotEmpty(cluster, CLUSTER_OPT);
-            validateEntityFields(fields);
-            validateFilterBy(filterBy, entityAction);
-            validateOrderBy(orderBy, entityAction);
-            result = ResponseHelper.getString(client.getEntitySummary(
-                    entityType, cluster, start, end, fields, filterBy, filterTags,
-                    orderBy, sortOrder, offset, numResults, numInstances, doAsUser));
-        } else if (optionsList.contains(TOUCH_OPT)) {
-            validateNotEmpty(entityName, ENTITY_NAME_OPT);
-            colo = getColo(colo);
-            result = client.touch(entityType, entityName, colo, skipDryRun, doAsUser).getMessage();
-        } else if (optionsList.contains(HELP_CMD)) {
-            OUT.get().println("Falcon Help");
-        } else {
-            throw new FalconCLIException("Invalid command");
-        }
-        OUT.get().println(result);
-    }
-
-    private void validateColo(Set<String> optionsList) throws FalconCLIException {
-        if (optionsList.contains(COLO_OPT)) {
-            throw new FalconCLIException("Invalid argument : " + COLO_OPT);
-        }
-    }
-
-    private void validateEntityFields(String fields) throws FalconCLIException {
-        if (StringUtils.isEmpty(fields)) {
-            return;
-        }
-        String[] fieldsList = fields.split(",");
-        for (String s : fieldsList) {
-            try {
-                EntityList.EntityFieldList.valueOf(s.toUpperCase());
-            } catch (IllegalArgumentException ie) {
-                throw new FalconCLIException("Invalid fields argument : " + FIELDS_OPT);
-            }
-        }
-    }
-
-    private Date parseDateString(String time) throws FalconCLIException {
-        if (time != null && !time.isEmpty()) {
-            try {
-                return SchemaHelper.parseDateUTC(time);
-            } catch(Exception e) {
-                throw new FalconCLIException("Time " + time + " is not valid", e);
-            }
-        }
-        return null;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java
deleted file mode 100644
index 544bda8..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconInstanceCLI.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.Options;
-import org.apache.falcon.LifeCycle;
-import org.apache.falcon.ResponseHelper;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-import org.apache.falcon.resource.InstanceDependencyResult;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Instance extension to Falcon Command Line Interface - wraps the RESTful API for instances.
- */
-public class FalconInstanceCLI extends FalconCLI {
-
-    private static final String FORCE_RERUN_FLAG = "force";
-    private static final String INSTANCE_TIME_OPT = "instanceTime";
-    private static final String RUNNING_OPT = "running";
-    private static final String KILL_OPT = "kill";
-    private static final String RERUN_OPT = "rerun";
-    private static final String LOG_OPT = "logs";
-    private static final String ALL_ATTEMPTS = "allAttempts";
-    private static final String RUNID_OPT = "runid";
-    private static final String CLUSTERS_OPT = "clusters";
-    private static final String SOURCECLUSTER_OPT = "sourceClusters";
-    private static final String LIFECYCLE_OPT = "lifecycle";
-    private static final String PARARMS_OPT = "params";
-    private static final String LISTING_OPT = "listing";
-    private static final String TRIAGE_OPT = "triage";
-
-    public FalconInstanceCLI() throws Exception {
-        super();
-    }
-
-    public Options createInstanceOptions() {
-
-        Options instanceOptions = new Options();
-
-        Option running = new Option(RUNNING_OPT, false,
-                "Gets running process instances for a given process");
-        Option list = new Option(LIST_OPT, false,
-                "Gets all instances for a given process in the range start time and optional end time");
-        Option status = new Option(STATUS_OPT, false,
-                "Gets status of process instances for a given process in the range start time and optional end time");
-        Option summary = new Option(SUMMARY_OPT, false,
-                "Gets summary of instances for a given process in the range start time and optional end time");
-        Option kill = new Option(KILL_OPT, false,
-                "Kills active process instances for a given process in the range start time and optional end time");
-        Option suspend = new Option(SUSPEND_OPT, false,
-                "Suspends active process instances for a given process in the range start time and optional end time");
-        Option resume = new Option(RESUME_OPT, false,
-                "Resumes suspended process instances for a given process "
-                        + "in the range start time and optional end time");
-        Option rerun = new Option(RERUN_OPT, false,
-                "Reruns process instances for a given process in the range start time and "
-                        + "optional end time and overrides properties present in job.properties file");
-        Option logs = new Option(LOG_OPT, false,
-                "Logs print the logs for process instances for a given process in "
-                        + "the range start time and optional end time");
-        Option params = new Option(PARARMS_OPT, false,
-                "Displays the workflow parameters for a given instance of specified nominal time"
-                        + "start time represents nominal time and end time is not considered");
-        Option listing = new Option(LISTING_OPT, false,
-                "Displays feed listing and their status between a start and end time range.");
-        Option dependency = new Option(DEPENDENCY_OPT, false,
-                "Displays dependent instances for a specified instance.");
-        Option triage = new Option(TRIAGE_OPT, false,
-                "Triage a feed or process instance and find the failures in it's lineage.");
-
-        OptionGroup group = new OptionGroup();
-        group.addOption(running);
-        group.addOption(list);
-        group.addOption(status);
-        group.addOption(summary);
-        group.addOption(kill);
-        group.addOption(resume);
-        group.addOption(suspend);
-        group.addOption(resume);
-        group.addOption(rerun);
-        group.addOption(logs);
-        group.addOption(params);
-        group.addOption(listing);
-        group.addOption(dependency);
-        group.addOption(triage);
-
-        Option url = new Option(URL_OPTION, true, "Falcon URL");
-        Option start = new Option(START_OPT, true,
-                "Start time is required for commands, status, kill, suspend, resume and re-run"
-                        + "and it is nominal time while displaying workflow params");
-        Option end = new Option(END_OPT, true,
-                "End time is optional for commands, status, kill, suspend, resume and re-run; "
-                        + "if not specified then current time is considered as end time");
-        Option runid = new Option(RUNID_OPT, true,
-                "Instance runid  is optional and user can specify the runid, defaults to 0");
-        Option clusters = new Option(CLUSTERS_OPT, true,
-                "clusters is optional for commands kill, suspend and resume, "
-                        + "should not be specified for other commands");
-        Option sourceClusters = new Option(SOURCECLUSTER_OPT, true,
-                " source cluster is optional for commands kill, suspend and resume, "
-                        + "should not be specified for other commands (required for only feed)");
-        Option filePath = new Option(FILE_PATH_OPT, true,
-                "Path to job.properties file is required for rerun command, "
-                        + "it should contain name=value pair for properties to override for rerun");
-        Option entityType = new Option(TYPE_OPT, true,
-                "Entity type, can be feed or process xml");
-        Option entityName = new Option(ENTITY_NAME_OPT, true,
-                "Entity name, can be feed or process name");
-        Option colo = new Option(COLO_OPT, true,
-                "Colo on which the cmd has to be executed");
-        Option lifecycle = new Option(LIFECYCLE_OPT, true,
-                "describes life cycle of entity , for feed it can be replication/retention "
-                        + "and for process it can be execution");
-        Option filterBy = new Option(FILTER_BY_OPT, true,
-                "Filter returned instances by the specified fields");
-        Option orderBy = new Option(ORDER_BY_OPT, true,
-                "Order returned instances by this field");
-        Option sortOrder = new Option(SORT_ORDER_OPT, true, "asc or desc order for results");
-        Option offset = new Option(OFFSET_OPT, true,
-                "Start returning instances from this offset");
-        Option numResults = new Option(NUM_RESULTS_OPT, true,
-                "Number of results to return per request");
-        Option forceRerun = new Option(FORCE_RERUN_FLAG, false,
-                "Flag to forcefully rerun entire workflow of an instance");
-        Option doAs = new Option(DO_AS_OPT, true, "doAs user");
-        Option debug = new Option(DEBUG_OPTION, false, "Use debug mode to see debugging statements on stdout");
-
-        Option instanceTime = new Option(INSTANCE_TIME_OPT, true, "Time for an instance");
-
-        Option allAttempts = new Option(ALL_ATTEMPTS, false, "To get all attempts of corresponding instances");
-
-        instanceOptions.addOption(url);
-        instanceOptions.addOptionGroup(group);
-        instanceOptions.addOption(start);
-        instanceOptions.addOption(end);
-        instanceOptions.addOption(filePath);
-        instanceOptions.addOption(entityType);
-        instanceOptions.addOption(entityName);
-        instanceOptions.addOption(runid);
-        instanceOptions.addOption(clusters);
-        instanceOptions.addOption(sourceClusters);
-        instanceOptions.addOption(colo);
-        instanceOptions.addOption(lifecycle);
-        instanceOptions.addOption(filterBy);
-        instanceOptions.addOption(offset);
-        instanceOptions.addOption(orderBy);
-        instanceOptions.addOption(sortOrder);
-        instanceOptions.addOption(numResults);
-        instanceOptions.addOption(forceRerun);
-        instanceOptions.addOption(doAs);
-        instanceOptions.addOption(debug);
-        instanceOptions.addOption(instanceTime);
-        instanceOptions.addOption(allAttempts);
-
-        return instanceOptions;
-    }
-
-    public void instanceCommand(CommandLine commandLine, FalconClient client) throws FalconCLIException, IOException {
-        Set<String> optionsList = new HashSet<String>();
-        for (Option option : commandLine.getOptions()) {
-            optionsList.add(option.getOpt());
-        }
-
-        String result;
-        String type = commandLine.getOptionValue(TYPE_OPT);
-        String entity = commandLine.getOptionValue(ENTITY_NAME_OPT);
-        String instanceTime = commandLine.getOptionValue(INSTANCE_TIME_OPT);
-        String start = commandLine.getOptionValue(START_OPT);
-        String end = commandLine.getOptionValue(END_OPT);
-        String filePath = commandLine.getOptionValue(FILE_PATH_OPT);
-        String runId = commandLine.getOptionValue(RUNID_OPT);
-        String colo = commandLine.getOptionValue(COLO_OPT);
-        String clusters = commandLine.getOptionValue(CLUSTERS_OPT);
-        String sourceClusters = commandLine.getOptionValue(SOURCECLUSTER_OPT);
-        List<LifeCycle> lifeCycles = getLifeCycle(commandLine.getOptionValue(LIFECYCLE_OPT));
-        String filterBy = commandLine.getOptionValue(FILTER_BY_OPT);
-        String orderBy = commandLine.getOptionValue(ORDER_BY_OPT);
-        String sortOrder = commandLine.getOptionValue(SORT_ORDER_OPT);
-        String doAsUser = commandLine.getOptionValue(DO_AS_OPT);
-        Integer offset = parseIntegerInput(commandLine.getOptionValue(OFFSET_OPT), 0, "offset");
-        Integer numResults = parseIntegerInput(commandLine.getOptionValue(NUM_RESULTS_OPT), null, "numResults");
-
-        colo = getColo(colo);
-        String instanceAction = "instance";
-        validateSortOrder(sortOrder);
-        validateInstanceCommands(optionsList, entity, type, colo);
-
-        if (optionsList.contains(TRIAGE_OPT)) {
-            validateNotEmpty(colo, COLO_OPT);
-            validateNotEmpty(start, START_OPT);
-            validateNotEmpty(type, TYPE_OPT);
-            validateEntityTypeForSummary(type);
-            validateNotEmpty(entity, ENTITY_NAME_OPT);
-            result = client.triage(type, entity, start, colo).toString();
-        } else if (optionsList.contains(DEPENDENCY_OPT)) {
-            validateNotEmpty(instanceTime, INSTANCE_TIME_OPT);
-            InstanceDependencyResult response = client.getInstanceDependencies(type, entity, instanceTime, colo);
-            result = ResponseHelper.getString(response);
-
-        } else if (optionsList.contains(RUNNING_OPT)) {
-            validateOrderBy(orderBy, instanceAction);
-            validateFilterBy(filterBy, instanceAction);
-            result = ResponseHelper.getString(client.getRunningInstances(type,
-                    entity, colo, lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser));
-        } else if (optionsList.contains(STATUS_OPT) || optionsList.contains(LIST_OPT)) {
-            boolean allAttempts = false;
-            if (optionsList.contains(ALL_ATTEMPTS)) {
-                allAttempts = true;
-            }
-            validateOrderBy(orderBy, instanceAction);
-            validateFilterBy(filterBy, instanceAction);
-            result = ResponseHelper.getString(client.getStatusOfInstances(type, entity, start, end, colo,
-                    lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser, allAttempts));
-        } else if (optionsList.contains(SUMMARY_OPT)) {
-            validateOrderBy(orderBy, "summary");
-            validateFilterBy(filterBy, "summary");
-            result = ResponseHelper.getString(client.getSummaryOfInstances(type, entity, start, end, colo,
-                    lifeCycles, filterBy, orderBy, sortOrder, doAsUser));
-        } else if (optionsList.contains(KILL_OPT)) {
-            validateNotEmpty(start, START_OPT);
-            validateNotEmpty(end, END_OPT);
-            result = ResponseHelper.getString(client.killInstances(type, entity, start, end, colo, clusters,
-                    sourceClusters, lifeCycles, doAsUser));
-        } else if (optionsList.contains(SUSPEND_OPT)) {
-            validateNotEmpty(start, START_OPT);
-            validateNotEmpty(end, END_OPT);
-            result = ResponseHelper.getString(client.suspendInstances(type, entity, start, end, colo, clusters,
-                    sourceClusters, lifeCycles, doAsUser));
-        } else if (optionsList.contains(RESUME_OPT)) {
-            validateNotEmpty(start, START_OPT);
-            validateNotEmpty(end, END_OPT);
-            result = ResponseHelper.getString(client.resumeInstances(type, entity, start, end, colo, clusters,
-                    sourceClusters, lifeCycles, doAsUser));
-        } else if (optionsList.contains(RERUN_OPT)) {
-            validateNotEmpty(start, START_OPT);
-            validateNotEmpty(end, END_OPT);
-            boolean isForced = false;
-            if (optionsList.contains(FORCE_RERUN_FLAG)) {
-                isForced = true;
-            }
-            result = ResponseHelper.getString(client.rerunInstances(type, entity, start, end, filePath, colo,
-                    clusters, sourceClusters, lifeCycles, isForced, doAsUser));
-        } else if (optionsList.contains(LOG_OPT)) {
-            validateOrderBy(orderBy, instanceAction);
-            validateFilterBy(filterBy, instanceAction);
-            result = ResponseHelper.getString(client.getLogsOfInstances(type, entity, start, end, colo, runId,
-                    lifeCycles, filterBy, orderBy, sortOrder, offset, numResults, doAsUser), runId);
-        } else if (optionsList.contains(PARARMS_OPT)) {
-            // start time is the nominal time of instance
-            result = ResponseHelper.getString(client.getParamsOfInstance(type, entity,
-                    start, colo, lifeCycles, doAsUser));
-        } else if (optionsList.contains(LISTING_OPT)) {
-            result = ResponseHelper.getString(client.getFeedInstanceListing(type, entity, start, end, colo, doAsUser));
-        } else {
-            throw new FalconCLIException("Invalid command");
-        }
-
-        OUT.get().println(result);
-    }
-
-    private void validateInstanceCommands(Set<String> optionsList,
-                                          String entity, String type,
-                                          String colo) throws FalconCLIException {
-
-        validateNotEmpty(entity, ENTITY_NAME_OPT);
-        validateNotEmpty(type, TYPE_OPT);
-        validateNotEmpty(colo, COLO_OPT);
-
-        if (optionsList.contains(CLUSTERS_OPT)) {
-            if (optionsList.contains(RUNNING_OPT)
-                    || optionsList.contains(LOG_OPT)
-                    || optionsList.contains(STATUS_OPT)
-                    || optionsList.contains(SUMMARY_OPT)) {
-                throw new FalconCLIException("Invalid argument: clusters");
-            }
-        }
-
-        if (optionsList.contains(SOURCECLUSTER_OPT)) {
-            if (optionsList.contains(RUNNING_OPT)
-                    || optionsList.contains(LOG_OPT)
-                    || optionsList.contains(STATUS_OPT)
-                    || optionsList.contains(SUMMARY_OPT) || !type.equals("feed")) {
-                throw new FalconCLIException("Invalid argument: sourceClusters");
-            }
-        }
-
-        if (optionsList.contains(FORCE_RERUN_FLAG)) {
-            if (!optionsList.contains(RERUN_OPT)) {
-                throw new FalconCLIException("Force option can be used only with instance rerun");
-            }
-        }
-    }
-
-    private List<LifeCycle> getLifeCycle(String lifeCycleValue) throws FalconCLIException {
-
-        if (lifeCycleValue != null) {
-            String[] lifeCycleValues = lifeCycleValue.split(",");
-            List<LifeCycle> lifeCycles = new ArrayList<LifeCycle>();
-            try {
-                for (String lifeCycle : lifeCycleValues) {
-                    lifeCycles.add(LifeCycle.valueOf(lifeCycle.toUpperCase().trim()));
-                }
-            } catch (IllegalArgumentException e) {
-                throw new FalconCLIException("Invalid life cycle values: " + lifeCycles, e);
-            }
-            return lifeCycles;
-        }
-        return null;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java
deleted file mode 100644
index 6487d41..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconMetadataCLI.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.Options;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.metadata.RelationshipType;
-
-import java.io.PrintStream;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Metadata extension to Falcon Command Line Interface - wraps the RESTful API for Metadata.
- */
-public class FalconMetadataCLI extends FalconCLI {
-
-    public static final AtomicReference<PrintStream> OUT = new AtomicReference<PrintStream>(System.out);
-
-    // Discovery Commands
-    public static final String DISCOVERY_OPT = "discovery";
-    public static final String LIST_OPT = "list";
-    public static final String RELATIONS_OPT = "relations";
-    public static final String URL_OPTION = "url";
-    public static final String NAME_OPT = "name";
-
-    // Lineage Commands
-    public static final String LINEAGE_OPT = "lineage";
-    public static final String VERTEX_CMD = "vertex";
-    public static final String VERTICES_CMD = "vertices";
-    public static final String VERTEX_EDGES_CMD = "edges";
-    public static final String PIPELINE_OPT = "pipeline";
-    public static final String EDGE_CMD = "edge";
-    public static final String ID_OPT = "id";
-    public static final String KEY_OPT = "key";
-    public static final String VALUE_OPT = "value";
-    public static final String DIRECTION_OPT = "direction";
-
-    public FalconMetadataCLI() throws Exception {
-        super();
-    }
-
-    public Options createMetadataOptions() {
-        Options metadataOptions = new Options();
-
-        OptionGroup group = new OptionGroup();
-        Option discovery = new Option(DISCOVERY_OPT, false, "Discover falcon metadata relations");
-        Option lineage = new Option(LINEAGE_OPT, false, "Get falcon metadata lineage information");
-        group.addOption(discovery);
-        group.addOption(lineage);
-        Option pipeline = new Option(PIPELINE_OPT, true,
-                "Get lineage graph for the entities in a pipeline");
-        metadataOptions.addOptionGroup(group);
-
-        // Add discovery options
-
-        Option list = new Option(LIST_OPT, false, "List all dimensions");
-        Option relations = new Option(RELATIONS_OPT, false, "List all relations for a dimension");
-        metadataOptions.addOption(list);
-        metadataOptions.addOption(relations);
-
-        Option url = new Option(URL_OPTION, true, "Falcon URL");
-        Option type = new Option(TYPE_OPT, true, "Dimension type");
-        Option name = new Option(NAME_OPT, true, "Dimension name");
-        Option cluster = new Option(CLUSTER_OPT, true, "Cluster name");
-        Option feed = new Option(FEED_OPT, true, "Feed Entity name");
-        Option process = new Option(PROCESS_OPT, true, "Process Entity name");
-        Option numResults = new Option(NUM_RESULTS_OPT, true,
-                "Number of results to return per request");
-
-        // Add lineage options
-        metadataOptions.addOption(pipeline);
-
-        metadataOptions.addOption(url);
-        metadataOptions.addOption(type);
-        metadataOptions.addOption(cluster);
-        metadataOptions.addOption(name);
-        metadataOptions.addOption(feed);
-        metadataOptions.addOption(process);
-        metadataOptions.addOption(numResults);
-
-        Option vertex = new Option(VERTEX_CMD, false, "show the vertices");
-        Option vertices = new Option(VERTICES_CMD, false, "show the vertices");
-        Option vertexEdges = new Option(VERTEX_EDGES_CMD, false, "show the edges for a given vertex");
-        Option edges = new Option(EDGE_CMD, false, "show the edges");
-        Option id = new Option(ID_OPT, true, "vertex or edge id");
-        Option key = new Option(KEY_OPT, true, "key property");
-        Option value = new Option(VALUE_OPT, true, "value property");
-        Option direction = new Option(DIRECTION_OPT, true, "edge direction property");
-        Option debug = new Option(DEBUG_OPTION, false, "Use debug mode to see debugging statements on stdout");
-
-        metadataOptions.addOption(vertex);
-        metadataOptions.addOption(vertices);
-        metadataOptions.addOption(vertexEdges);
-        metadataOptions.addOption(edges);
-        metadataOptions.addOption(id);
-        metadataOptions.addOption(key);
-        metadataOptions.addOption(value);
-        metadataOptions.addOption(direction);
-        metadataOptions.addOption(debug);
-
-        Option doAs = new Option(FalconCLI.DO_AS_OPT, true, "doAs user");
-        metadataOptions.addOption(doAs);
-
-        return metadataOptions;
-    }
-
-    public void metadataCommand(CommandLine commandLine, FalconClient client) throws FalconCLIException {
-        Set<String> optionsList = new HashSet<String>();
-        for (Option option : commandLine.getOptions()) {
-            optionsList.add(option.getOpt());
-        }
-
-        String result;
-        String dimensionType = commandLine.getOptionValue(TYPE_OPT);
-        String cluster = commandLine.getOptionValue(CLUSTER_OPT);
-        String feed = commandLine.getOptionValue(FEED_OPT);
-        String process = commandLine.getOptionValue(PROCESS_OPT);
-        String dimensionName = commandLine.getOptionValue(NAME_OPT);
-        String id = commandLine.getOptionValue(ID_OPT);
-        String key = commandLine.getOptionValue(KEY_OPT);
-        String value = commandLine.getOptionValue(VALUE_OPT);
-        String direction = commandLine.getOptionValue(DIRECTION_OPT);
-        String pipeline = commandLine.getOptionValue(PIPELINE_OPT);
-        String doAsUser = commandLine.getOptionValue(FalconCLI.DO_AS_OPT);
-        Integer numResults = parseIntegerInput(commandLine.getOptionValue(NUM_RESULTS_OPT), null, "numResults");
-
-        if (optionsList.contains(LINEAGE_OPT)) {
-            validatePipelineName(pipeline);
-            result = client.getEntityLineageGraph(pipeline, doAsUser).getDotNotation();
-        } else if (optionsList.contains(LIST_OPT)) {
-            validateDimensionType(dimensionType.toUpperCase());
-            if (!(dimensionType.toUpperCase())
-                    .equals(RelationshipType.REPLICATION_METRICS.name())) {
-                result = client.getDimensionList(dimensionType, cluster, doAsUser);
-            } else {
-                String schedEntityType = null;
-                String schedEntityName = null;
-                if (StringUtils.isNotEmpty(feed)) {
-                    schedEntityType = EntityType.getEnum(FEED_OPT).name();
-                    schedEntityName = feed;
-                } else if (StringUtils.isNotEmpty(process)) {
-                    schedEntityType = EntityType.getEnum(PROCESS_OPT).name();
-                    schedEntityName = process;
-                }
-                validateScheduleEntity(schedEntityType, schedEntityName);
-
-                result = client.getReplicationMetricsDimensionList(schedEntityType, schedEntityName,
-                        numResults, doAsUser);
-            }
-        } else if (optionsList.contains(RELATIONS_OPT)) {
-            validateDimensionType(dimensionType.toUpperCase());
-            validateDimensionName(dimensionName, RELATIONS_OPT);
-            result = client.getDimensionRelations(dimensionType, dimensionName, doAsUser);
-        } else if (optionsList.contains(VERTEX_CMD)) {
-            validateId(id);
-            result = client.getVertex(id, doAsUser);
-        } else if (optionsList.contains(VERTICES_CMD)) {
-            validateVerticesCommand(key, value);
-            result = client.getVertices(key, value, doAsUser);
-        } else if (optionsList.contains(VERTEX_EDGES_CMD)) {
-            validateVertexEdgesCommand(id, direction);
-            result = client.getVertexEdges(id, direction, doAsUser);
-        } else if (optionsList.contains(EDGE_CMD)) {
-            validateId(id);
-            result = client.getEdge(id, doAsUser);
-        } else {
-            throw new FalconCLIException("Invalid metadata command");
-        }
-
-        OUT.get().println(result);
-    }
-
-    private void validatePipelineName(String pipeline) throws FalconCLIException {
-        if (StringUtils.isEmpty(pipeline)) {
-            throw new FalconCLIException("Invalid value for pipeline");
-        }
-    }
-
-    private void validateDimensionType(String dimensionType) throws FalconCLIException {
-        if (StringUtils.isEmpty(dimensionType)
-                ||  dimensionType.contains("INSTANCE")) {
-            throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType);
-        }
-        try {
-            RelationshipType.valueOf(dimensionType);
-        } catch (IllegalArgumentException iae) {
-            throw new FalconCLIException("Invalid value provided for queryParam \"type\" " + dimensionType);
-        }
-    }
-
-    private void validateDimensionName(String dimensionName, String action) throws FalconCLIException {
-        if (StringUtils.isEmpty(dimensionName)) {
-            throw new FalconCLIException("Dimension ID cannot be empty or null for action " + action);
-        }
-    }
-
-    private void validateScheduleEntity(String schedEntityType, String schedEntityName) throws FalconCLIException {
-        if (StringUtils.isBlank(schedEntityType)) {
-            throw new FalconCLIException("Entity must be schedulable type : -feed/process");
-        }
-
-        if (StringUtils.isBlank(schedEntityName)) {
-            throw new FalconCLIException("Entity name is missing");
-        }
-    }
-
-    private void validateId(String id) throws FalconCLIException {
-        if (id == null || id.length() == 0) {
-            throw new FalconCLIException("Missing argument: id");
-        }
-    }
-
-    private void validateVerticesCommand(String key, String value) throws FalconCLIException {
-        if (key == null || key.length() == 0) {
-            throw new FalconCLIException("Missing argument: key");
-        }
-
-        if (value == null || value.length() == 0) {
-            throw new FalconCLIException("Missing argument: value");
-        }
-    }
-
-    private void validateVertexEdgesCommand(String id, String direction) throws FalconCLIException {
-        if (id == null || id.length() == 0) {
-            throw new FalconCLIException("Missing argument: id");
-        }
-
-        if (direction == null || direction.length() == 0) {
-            throw new FalconCLIException("Missing argument: direction");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/client/src/main/java/org/apache/falcon/cli/FalconRecipeCLI.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/cli/FalconRecipeCLI.java b/client/src/main/java/org/apache/falcon/cli/FalconRecipeCLI.java
deleted file mode 100644
index 82053f9..0000000
--- a/client/src/main/java/org/apache/falcon/cli/FalconRecipeCLI.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.cli;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.client.FalconCLIException;
-import org.apache.falcon.client.FalconClient;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Recipe extension to Falcon Command Line Interface - wraps the RESTful API for Recipe.
- */
-public class FalconRecipeCLI extends FalconCLI {
-
-    public FalconRecipeCLI() throws Exception {
-        super();
-    }
-
-    private static final String RECIPE_NAME = "name";
-    private static final String RECIPE_OPERATION= "operation";
-    private static final String RECIPE_TOOL_CLASS_NAME = "tool";
-    private static final String RECIPE_PROPERTIES_FILE = "properties";
-
-    public Options createRecipeOptions() {
-        Options recipeOptions = new Options();
-        Option url = new Option(URL_OPTION, true, "Falcon URL");
-        recipeOptions.addOption(url);
-
-        Option recipeFileOpt = new Option(RECIPE_NAME, true, "recipe name");
-        recipeOptions.addOption(recipeFileOpt);
-
-        Option recipeToolClassName = new Option(RECIPE_TOOL_CLASS_NAME, true, "recipe class");
-        recipeOptions.addOption(recipeToolClassName);
-
-        Option recipeOperation = new Option(RECIPE_OPERATION, true, "recipe operation");
-        recipeOptions.addOption(recipeOperation);
-
-        Option recipeProperties = new Option(RECIPE_PROPERTIES_FILE, true, "recipe properties file path");
-        recipeOptions.addOption(recipeProperties);
-
-        Option skipDryRunOperation = new Option(SKIPDRYRUN_OPT, false, "skip dryrun operation");
-        recipeOptions.addOption(skipDryRunOperation);
-
-        Option doAs = new Option(DO_AS_OPT, true, "doAs user");
-        recipeOptions.addOption(doAs);
-
-        return recipeOptions;
-    }
-
-    public void recipeCommand(CommandLine commandLine, FalconClient client) throws FalconCLIException {
-        Set<String> optionsList = new HashSet<String>();
-        for (Option option : commandLine.getOptions()) {
-            optionsList.add(option.getOpt());
-        }
-
-        String recipeName = commandLine.getOptionValue(RECIPE_NAME);
-        String recipeToolClass = commandLine.getOptionValue(RECIPE_TOOL_CLASS_NAME);
-        String recipeOperation = commandLine.getOptionValue(RECIPE_OPERATION);
-        String recipePropertiesFile = commandLine.getOptionValue(RECIPE_PROPERTIES_FILE);
-        String doAsUser = commandLine.getOptionValue(DO_AS_OPT);
-
-        validateNotEmpty(recipeName, RECIPE_NAME);
-        validateNotEmpty(recipeOperation, RECIPE_OPERATION);
-        validateRecipeOperations(recipeOperation);
-        validateRecipePropertiesFile(recipePropertiesFile, recipeName);
-        Boolean skipDryRun = null;
-        if (optionsList.contains(SKIPDRYRUN_OPT)) {
-            skipDryRun = true;
-        }
-
-        String result = client.submitRecipe(recipeName, recipeToolClass,
-                recipeOperation, recipePropertiesFile, skipDryRun, doAsUser).toString();
-        OUT.get().println(result);
-    }
-
-    private static void validateRecipeOperations(String recipeOperation) throws FalconCLIException {
-        for(RecipeOperation operation : RecipeOperation.values()) {
-            if (operation.toString().equalsIgnoreCase(recipeOperation)) {
-                return;
-            }
-        }
-        throw new FalconCLIException("Allowed Recipe operations: "
-                + java.util.Arrays.asList((RecipeOperation.values())));
-    }
-
-    private static void validateRecipePropertiesFile(String recipePropertiesFile, String recipeName)
-        throws FalconCLIException {
-        if (StringUtils.isBlank(recipePropertiesFile)) {
-            return;
-        }
-
-        String []fileSplits = recipePropertiesFile.split("/");
-        String recipePropertiesName = (fileSplits[fileSplits.length-1]).split("\\.")[0];
-        if (recipePropertiesName.equals(recipeName)) {
-            return;
-        }
-
-        throw new FalconCLIException("Provided properties file name do match with recipe name: " +recipeName);
-    }
-}


[50/51] [partial] falcon git commit: FALCON-1830 Removed code source directories and updated pom

Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
deleted file mode 100755
index 523e1b8..0000000
--- a/CHANGES.txt
+++ /dev/null
@@ -1,1866 +0,0 @@
-Apache Falcon Change log
-Trunk
-  TASKS:
-  INCOMPATIBLE CHANGES
-  NEW FEATURES
-    FALCON-1627 Provider integration with Azure Data Factory pipelines (Ying Zheng, Venkat Ranganathan, Sowmya Ramesh)
-   
-    FALCON-1664 Add Postgres support for native scheduler(Deepak Barr via Pragya Mittal)
-
-    FALCON-1495 In instance status list, show all runs for instances when requested by user(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1230 Data based notification Service to notify execution instances when data becomes available(Pavan Kumar Kolamuri via Ajay Yadava)
-
-  IMPROVEMENTS
-    FALCON-1584 Falcon allows invalid hadoop queue name for schedulable feed entities (Venkatesan Ramachandran via Balu Vellanki)
-
-    FALCON-1774 Falcon to honour PRISM_URL env var (Praveen Adlakha) 
-
-    FALCON-1721 Checkstyle doesn't extend parent.
-    
-    FALCON-1818 Minor doc update for tar package locations after FALCON-1751 (Deepak Barr)
-
-    FALCON-1729 Database ingest to support password alias via keystore file (Venkatesan Ramachandran via Balu Vellanki)
-
-    FALCON-1751 Support assembly:single mojo(Ruoyu Wang via Ajay Yadava)
-
-    FALCON-763 Support feed listing for CatalogStorage (Balu Vellanki)
-
-    FALCON-1764 Remove temporary folder "localhost" created during tests(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1756 Remove PID files on service stop(Deepak Barr via Ajay Yadava)
-
-    FALCON-1771 Tool to merge pull requests (Ajay Yadava)
-
-    FALCON-1770 Update README file (Ajay Yadava)
-
-  BUG FIXES
-    FALCON-887 Support for multiple lib paths in falcon process (Sowmya Ramesh)
-
-    FALCON-1795 Kill api not killing waiting/ready instances
-   
-    FALCON-1804 Non-SLA feed throws NullPointerException.
-    
-    FALCON-1806 Update documentation for Import and Export. (Venkatesan Ramachandran via Balu Vellanki)
-
-    FALCON-1787 Ooozie pig-action.xml requires hive sharelib for HCatalog use(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-1792 Upgrade hadoop.version to 2.6.2 (Venkatesan Ramachandran via Peeyush Bishnoi)
-
-    FALCON-1796 [HOTFIX] Incorrect parent pom in distro module(Ajay Yadava)
-
-    FALCON-1779 Fix rat-check failure in trunk (Ajay Yadava)
-
-
-Proposed Release Version: 0.9
-  TASKS:
-    FALCON-1778 Check licenses and copyright information (Pallavi Rao)
-
-    FALCON-1718 Change versions in pom.xml of master and 0.9 branch(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1594 Update changes.txt to reflect 0.8 as released version(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-1523 Update CHANGES.txt to change 0.8 branch to release(Sowmya Ramesh)
-
-    FALCON-1453 Upgrade falcon POM for 0.8 release(Sowmya Ramesh)
-    
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-    FALCON-1742 Implement instance summary api for native scheduler (Pallavi Rao)
-
-    FALCON-1677 Support re-tries for timed-out instances (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1643 Add CLI option to display captured replication metrics(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1679 API to get type of scheduler(native/oozie) (Pallavi Rao)
-
-    FALCON-1645 Ability to export to database(Venkat Ramachandran via Balu Vellanki)
-
-    FALCON-1639 Implement update feature for native scheduler (Pallavi Rao)
-
-    FALCON-1636 Add Rerun API In Falcon Native Scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1652 Documentation for enabling native scheduler in falcon (Pallavi Rao)
-
-    FALCON-1512 Implement touch feature for native scheduler (Pallavi Rao)
-
-    FALCON-1233 Support co-existence of Oozie scheduler (coord) and Falcon native scheduler (Pallavi Rao)
-
-    FALCON-1596 Spring shell based CLI for Falcon
-	    FALCON-1608 Base framework for Spring Shell based shell for Falcon (Rajat Khandelwal via Ajay Yadava)
- 
-    FALCON-1234 State Store for instances scheduled by Falcon (Pavan Kolamuri via Pallavi Rao)
-    
-    FALCON-1480 Gather data transfer details of Hive DR(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1588 Add ability to provide the path for recipe files in command line(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1573 Supply user-defined properties to Oozie workflows during schedule(Daniel Del Castillo via Ajay Yadava)
-
-    FALCON-1559 Config changes required for native scheduler (Pallavi Rao)
-
-    FALCON-1459 Ability to import from database(Venkat Ramachandran via Sowmya Ramesh)
-
-    FALCON-1213 Base framework of the native scheduler(Pallavi Rao)
-
-  IMPROVEMENTS
-    FALCON-1707 Code Refactoring for Falcon Client(Ajay Yadava)
-
-    FALCON-1733 Support for building falcon with JDK 1.8 also(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1662 Ensure entity can be scheduled on multiple clusters on same colo (Pallavi Rao)
-
-    FALCON-1545 Add documentation for Hive replication job counters(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1601 Make Falcon StateStore more secure by not disclosing imp params in startup.props(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1705 Standardization of error handling in falcon Server(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1640 Cascading Delete for instances in Native Scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1683 Inconsistent behavior when user tries to switch schedulers(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1669 Falcon should show more helpful message when it is unable to initialize Falcon Client object(Baishuo via Ajay Yadava)
-
-    FALCON-1635 Migration of EntityManagerJerseySmokeIT to use falcon unit(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1447 Integration Tests for native scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1617 Enable SLA monitoring for instances in past(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1577 Migration of EntityManagerJerseyIT to use falcon unit (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1658 MySql Support for Native Scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1656 Improve FeedHelper:getRetentionFrequency method(Ajay Yadava)
-
-    FALCON-1616 Consume Workflow job end notifications for SLA monitoring(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1634 Add .reviewboardrc file so that review requests can be created using just command line(Rajat Khandelwal via Ajay Yadava)
-
-    FALCON-1557 Supporting some Entity Management Api's and admin api in Falcon Unit (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1622 On starting falcon server JPS shows Main and not Falcon(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1607 Native Scheduler - Code refactoring: Refactor ID into more specific sub classes(Ajay Yadava)
-
-    FALCON-1587 Divide FalconCLI.twiki into sub sections for different modules on the lines of REST Api(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1552 Migration of  ProcessInstanceManagerIT to use falcon unit (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1486 Add Unit Test cases for HiveDR(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1592 Code Refactoring: Introduce Event type for scheduler events (Ajay Yadava via Pallavi Rao)
-
-    FALCON-1593 Oozie setup failing in setup phase (Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1582 Documentation for globally disabling retries (Pallavi Rao)
-
-    FALCON-1517 Instance Management Api in Falcon Unit (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1520 Delete, update, Validate entity operations support in Falcon Unit (Pavan Kolamuri via Pallavi Rao)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-    FALCON-1793 feed element action="archive" is submittable via command line tool falcon (Deepak Barr)
-
-    FALCON-1782 Client returns FalconWebException instead of the expected error message(Praveed Adlakha via Ajay Yadava)
-
-    FALCON-1747 Falcon instance status listing is throwing error message (Peeyush Bishnoi)    
-
-    FALCON-1758 APIs fail when oozie workflow entries are deleted (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1754 JobCompletionService throws FalconException (Pallavi Rao)
-
-    FALCON-1716 API fails with CommunicationsException when mysql interaction time is longer than 53,434,939 milliseconds (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1757 EntityNotRegisteredException when entity is deleted from falcon (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1748 Client throws FalconWebException irrespective of type of error(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1727 Suspend fails with InvalidStateTransitionException if entity has 'KILLED' instances (Pallavi Rao)
-
-    FALCON-1723 Rerun with skip fail actions won't work in few cases (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1538 Prism status gives wrong info(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1715 IllegalStateException in MetadataMappingService when entity is scheduled via native scheduler (Pallavi Rao)
-
-    FALCON-1732 Folders not getting deleted after testing (Praveen Adlakha via Pallavi Rao)
-
-    FALCON-1675 Lifecycle stage is not honoured when only cluster level lifecycle is implemented (Ajay Yadava via Pallavi Rao)
-
-    FALCON-1730 Upgrade enunciate for JDK 8 compatibility(Deepak Barr via Ajay Yadava)
-
-    FALCON-1741 Rerun API behaviour different in case of succeeded instances(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1735 FalconException shouldn't be caught in AbstractRerunHandler in case of obtaining Workflow engine from entity(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1678 SLA Monitoring does not honour entity end date(Ajay Yadava)
-
-    FALCON-1708  params API does not take start as a mandatory option(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1725 Falcon API shows results in ascending order in native scheduler (Pallavi Rao)
-
-    FALCON-1720 Rerun API does not rerun succeeded instances (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1719 Retry does not update the state of the instance in the database (Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1710 dependency API sets totalResults as 0 by default(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1714 EntityNotRegisteredException when process with no input/output feed is scheduled(Ajay Yadava)
-
-    FALCON-1674 Fix the mapping of InstanceState status to workflow Status in InstancesResult (Pallavi Rao via Ajay Yadava)
-
-    FALCON-1709 FIFO order is not followed when scheduled using native scheduler (Pallavi Rao)
-
-    FALCON-1711 DependencyInstance twiki does not contain correct documentation	(Praveen Adlakha via Pallavi Rao)
-
-    FALCON-1702 Exception thrown by workflow status listeners on success notification(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1703 Falcon docs module not generating html files(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1565 Listing API non-intuitive response if time > endTime (Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1696 Falcon build failing in checkstyle module (Praveen Adlakha via Pallavi Rao)
-
-    FALCON-1694 Status API shows params along with instance status(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1681 Improve logging for idempotent behaviour while scheduling entities(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1680 Error message is not intuitive when entity schedule fails(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1644 Retention : Some feed instances are never deleted by retention jobs(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1641 Triage on an invalid feed instance throws IndexOutOfBoundException(Karishma Gulati via Ajay Yadava)
-
-    FALCON-1572 Only one instance is running in a process when run using Native Scheduler(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1660 Examples directory missing in distributed mode(Praveen Adlakha via Ajay Yadava)
-
-    FALCON-1647 Unable to create feed : FilePermission error under cluster staging directory(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1651 Falcon doesn't start (Ajay Yadava)
-
-    FALCON-1598 Flaky test : EntityManagerJerseyIT.testDuplicateDeleteCommands (Narayan Periwal via Pallavi Rao)
-
-    FALCON-1568 Process Instances are not getting scheduled in Falcon Native Scheduler (Pallavi Rao)
-
-    FALCON-1595 In secure cluster, Falcon server loses ability to communicate with HDFS over time (Balu Vellanki)
-
-    FALCON-1490 Fixing inconsistencies in filterBy behavior (Narayan Periwal via Balu Vellanki)
-
-    FALCON-1604 Status API Not working in case of Falcon Native Scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1589 Package sample recipe properties file(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1597 Falcon should not retry in case of an instance being manual kill from user (Sandeep Samudrala via Pallavi Rao)
-
-    FALCON-1606 Process schedule fails in some cases in case of NativeScheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1605 Instance API is not working as expected in case of Native Scheduler(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1603 FeedHelperTest::testGetDateFromPath fails in some environments(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1372 Retention does not work in corner cases(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1569 Bug in setting the frequency of Feed retention coordinator(Sowmya Ramesh)
-
-    FALCON-1578 post-processing action fails with javax.servlet.jsp.el.ELException(Venkat Ramachandran via Ajay Yadava)
-
-    FALCON-1579 post-processing action fails with javax.servlet.jsp.el.ELException (Sowmya Ramesh via Pallavi Rao)
-
-    FALCON-1553 Flaky test in SchedulerUtilTest (Balu Vellanki via Pallavi Rao)   
- 
-    FALCON-1554 Fix Changes.txt after 0.8 branch cut(Sowmya Ramesh)
-
-Release version: 0.8
-    FALCON-1349 Upgrade falcon POM for 0.7 release(Ajay Yadava)
-
-  INCOMPATIBLE CHANGES
-    FALCON-1401 MetadataMappingService fails to add an edge for a process instance(Pallavi Rao) 
-
-  NEW FEATURES
-    FALCON-570 Falcon needs to publish maven artifacts (Shwethags GS)
-
-    FALCON-1485 Add documentation for data transfer details of filesystem replication(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1315 Update falcon ui for HiveDR, secure clusters and bug fixes(Armando Reyna/Venkat Ranganathan via Sowmya Ramesh)
-
-    FALCON-1102 Gather data transfer details of filesystem replication(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1316 Add supporting REST API calls for new UI(Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1473 Feed SLA Miss Alerts through REST API(Ajay Yadava)
-
-    FALCON-965 Open up life cycle stage implementation within Falcon for extension(Ajay Yadava)
-
-    FALCON-1437 Change DR recipes notification with Falcon notification(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1027 Falcon proxy user support(Sowmya Ramesh)
-
-  IMPROVEMENTS
-    FALCON-1585 Documentation for HDFS and Hive DR(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1574 Document behavior of lifecycle retention stage frequency behavior(Sowmya Ramesh via Ajay Yadava).
-
-    FALCON-1564 Provide an option for users to disable system post-processing JMS notification (Pallavi Rao) 
-
-    FALCON-1519 Suspend And Resume API's in Falcon Unit(Narayan Periwal via Pallavi Rao)
-
-    FALCON-1524 Improve Lifecycle Retention validation checks(Ajay Yadava)
-
-    FALCON-1516 Feed Retention support in Falcon Unit(Pavan Kolamuri via Pallavi Rao) 
-
-    FALCON-1527 Release Falcon Unit test jar(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1231 Improve JobCompletionNotification Service(Pallavi Rao)
-
-    FALCON-1157 Build error when using maven 3.3.x(Venkat Ramachandran via Pallavi Rao)
-
-    FALCON-1477 Adding "-debug" option to Falcon CLI for debug statements to stdout(Narayan Periwal via Pallavi Rao)
-
-    FALCON-1476 Maintaining threshold on monitoring entities for SLA service(Ajay Yadava)
-
-    FALCON-592 Refactor FalconCLI to make it more manageable(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1472 Improvements in SLA service(Ajay Yadava)
-
-    FALCON-438 Auto generate documentation for REST API(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1483 Add Utils to common to support native scheduler(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1417 Make validity end date optional for feed / process(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1434 Enhance schedule API to accept key-value properties(Pallavi Rao)   
-
-    FALCON-1426 Add documentation for Falcon Email notification (Peeyush Bishnoi via Pallavi Rao)
-
-    FALCON-1425 Provide Email based plugin to send Notification once instance completed(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1205 SLAService to keep track of missing SLAs for feeds(Ajay Yadava)
-
-    FALCON-1449 Move getEntityProperties method to EntityUtil.(Ajay Yadava)
-
-    FALCON-1357 Update CHANGES.txt to change 0.7 branch to release.(Ajay Yadava)
-
-    FALCON-1414 Add all fields in filterBy to the entity list output.(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1430 Exclude designer from source tarball(Ajay Yadava)
-
-    FALCON-348 Add shutdown hook for Falcon (Sandeep Samudrala via Pallavi Rao)
- 
-  OPTIMIZATIONS
-    FALCON-1403 Revisit IT cleanup and teardown(Narayan Periwal via Pallavi Rao)
-
-  BUG FIXES
-    FALCON-1579 post-processing action fails with javax.servlet.jsp.el.ELException (Sowmya Ramesh via Pallavi Rao)
-
-    FALCON-1563 Old feed instances get deleted from SLA monitoring on feed update (Ajay Yadava).
-
-    FALCON-1560 Lifecycle does not allow feed with frequency greater than days(1) (Ajay Yadava).
-
-    FALCON-1556 Falcon build fails when building with hivedr profile(Sowmya Ramesh)
-
-    FALCON-1558 Sla API shows pending instance although _SUCCESS directory is present(Pragya Mittal via Ajay Yadava).
-
-    FALCON-1555 succeeded-post-processing action fails in Oozie uber mode (Pragya Mittal via Pallavi Rao)
-
-    FALCON-1551 Implement setWorkingDir Method in JailedFileSystem(Pavan Kolamuri via Pallavi Rao)
-
-    FALCON-1541 Bundle deploy.properties while packaging falcon (Pragya Mittal via Pallavi Rao)
-
-    FALCON-1530 SLAMonitoring API is not honouring delete feature(Ajay Yadava).
-
-    FALCON-1532 Incorrect documentation in SLA monitoring(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1466 Flaky unit tests in Proxy user feature(Sowmya Ramesh)
-
-    FALCON-1518 Temporarily comment getTotalEnqueueCount in JMSMessageConsumerTest(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1514 Incorrect logging while submitting cluster(Sandeep Samudrala via Pallavi Rao)
-
-    FALCON-1509 Auto generation of REST API does not work in distributed mode(Narayan Periwal via Sowmya Ramesh)
-
-    FALCON-1491 Update ActiveMQ version to 5.12(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1487 In secure cluster setup Hcat process/feed scheduling or replication fails(Sowmya Ramesh)
-
-    FALCON-1339 List feed entities shows scheduled Feed entities as submitted(Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1499 Lifecycle Retention Issues(Ajay Yadava)
-
-    FALCON-1469 Flaky EntityManagerJerseyIT#testDuplicateSubmitCommands(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1470 HiveDRStatusStoreTest should fail when using fakeGroup to create StatusStore(Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1462 Modify use of "testuser" in UTs and ITs(Narayan Periwal via Sowmya Ramesh)
-
-    FALCON-1484 Find Bundles fails in case of hadoop namenode moving to HA(Sandeep Samudrala via Pallavi Rao)
-
-    FALCON-1343 Fix validation of read/write endpoints in ClusterEntityParser(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1373 HiveDR does not work when job is run on destination cluster(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1401 MetadataMappingService fails to add an edge for a process instance(Pallavi Rao) 
-
-    FALCON-1465 Cluster submission fails with java.lang.IllegalArgumentException in distributed mode(Ajay Yadava via Sowmya Ramesh)
-
-    FALCON-1331 Update Failed for an entity and further updates are not possible as lock is not released(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1408 Add more logging information for failing ClusterEntityValidationIT(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1442 Contract of WorkflowEngine API broken(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1460 Move getHiveCredentials method to ClusterHelper(Ajay Yadava via Sowmya Ramesh)
-
-    FALCON-1342 Do not allow duplicate properties in entities(Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1461 NPE in DateValidator validate(Raghav Kumar Gautam via Sowmya Ramesh)
-
-    FALCON-1446 Flaky TaskLogRetrieverYarnTest(Narayan Periwal via Pallavi Rao)
-
-    FALCON-1443 NPE in shutdown hook(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1445 Docfix for api/instance/dependencies endpoint(Raghav Kumar Gautam via Ajay Yadava)
-
-    FALCON-1381 The webservice response of Falcon includes a stack trace in case of errors(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1438 Falcon RestAPI - In case of error falcon responds with FalconWebException::null(Balu Vellanki via Ajay Yadava).
-
-    FALCON-1415 Hive DR process fail if there are no events available from source table(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1371 Status of scheduled Process entity is shown as submitted in corner case(Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1402 Validate cmd throws NPE when source cluster and any one of target cluster doesn't have overlapping dates(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1365 HCatReplication job fails with AccessControlException(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-298 Feed update with replication delay creates holes(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1410 Entity submit fails when multiple threads try submitting same definition(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1429 Fix Falcon monitoring, alert, audit and monitoring plugins by fixing aspectj handling(Venkat Ranganathan via Ajay Yadava)
-
-    FALCON-1416 Add ACL (if missing) during touch(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1413 Filter By throws wrong error in case of wrong argument being passed(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1421 checkColos should be a no-op for embedded mode(Venkat Ranganathan via Ajay Yadava)
-
-  
-Release version: 0.7
-  FALCON-1424 Update changes.txt for 0.7 branch(Ajay Yadava)
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-    FALCON-1188 Falcon support for Hive Replication(Venkat Ranganathan via Ajay Yadava)
-
-    FALCON-1297 Falcon Unit which supports Submit and Schedule of jobs(Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1039 Add instance dependency API in falcon (Ajay Yadava)
-
-    FALCON-796 Enable users to triage data processing issues through falcon (Ajay Yadava)
-    
-  IMPROVEMENTS
-    FALCON-1432 Fix licenses in LICENSES.txt(Ajay Yadava)
-
-    FALCON-1378 Falcon documentation lacks information on how to run Falcon on standalone Oozie/Hadoop setup(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1419 Document ability to disable oozie dryRun during entity schedule actions(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1174 Ability to disable oozie dryrun while scheduling or updating the falcon entity(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1374 Remove the cap on numResults(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1379 Doc describes retention incorrectly(Ajay Yadava)
-
-    FALCON-1359 Improve output format for Feed Instance Listing(Ajay Yadava)
-
-    FALCON-1368 Improve Falcon server restart time(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1361 Default end date should be now(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1362 Colo option shouldn't be mandatory(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1360 Size of feed should be humanized in feed instance listing(Sndeep Samudrala via Ajay Yadava)
-
-    FALCON-1301 Improve documentation for Installation(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1322 Add prefix in runtime.properties(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1317 Inconsistent JSON serialization(Ajay Yadava)
-
-    FALCON-1324 Pagination API breaks backward compatibility(Ajay Yadava).
-
-    FALCON-668 FeedReplicator improvement to include more DistCP options(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-1320 Adding equals() and hashCode() method in LineageGraphResult.Edge(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1139 Validation issues in Falcon UI(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1204 Expose default configs for feed late data handling in runtime.properties(Balu Vellanki via Ajay Yadava)
-
-    FALCON-1170 Falcon Native Scheduler - Refactor existing workflow/coord/bundle builder(Pallavi Rao via Ajay Yadava)
-    
-    FALCON-1031 Make post processing notifications to user topics optional (Pallavi Rao via Ajay Yadava)
-    
-    FALCON-1186 Add filtering capability to result of instance summary (Suhas Vasu)
-
-    FALCON-1293 Update CHANGES.txt to change 0.6.1 branch to release (Shaik Idris Ali via Ajay Yadava)
-
-    FALCON-1116 Rule for Oozie 4+ doesn't match 5+ versions (Ruslan Ostafiychuk)
-
-    FALCON-1114 Oozie findBundles lists a directory and tries to match with the bundle's appPath
-    (Pallavi Rao via Ajay Yadava)
-
-    FALCON-1207 Falcon checkstyle allows wildcard imports(Pallavi Rao via Ajay Yadava)
-    
-    FALCON-1147 Allow _ in the names for name value pair(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-676 Enable metrics for Titan(Ajay Yadava)
-
-    FALCON-1060 Handle transaction failures in Lineage(Pavan Kumar Kolamuri via Ajay Yadava)
-    
-    FALCON-1212 Remove dependency on Gremlin (Ajay Yadava via Suhas Vasu)
-
-    FALCON-1211 Source tarball are not generated in mvn assembly when profile is distributed
-    (Shaik Idris Ali)
-
-    FALCON-1219 Minor doc fix(Raghav Kumar Gautam via Ajay Yadava)
-
-    FALCON-1059 Lineage should support bootstrapping(Ajay Yadava)
-
-    FALCON-75 Falcon CLI for deleting entities should inform user if entity does not exist(Ajay Yadava)
-
-    FALCON-1127 Falcon entity status is submitted when corresponding Bundle
-    is runningwitherror(Pallavi Rao via Suhas Vasu)
-
-    FALCON-1142 ClusterEntityParserTest.testClusterWithOnlyStaging
-    fails when run alone(Pallavi Rao via Suhas Vasu)
-
-    FALCON-1152 Names of Xml attributes and Class member variables
-    are inconsistent(Ajay Yadava)
-
-  OPTIMIZATIONS
-    FALCON-1228 testSubscriber test of JMSMessageConsumerTest fails at a particular scenario
-    (Suhas Vasu)
-
-  BUG FIXES
-    FALCON-1203 2 validateACL UTs broken(Narayan Periwal via Ajay Yadava)
-
-    FALCON-1423 HDFS recipe broken: Unrecognized option: -counterLogDir(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-1312 Falcon post processing action should use Oozie prepared configuration(Venkat Ranganathan via Ajay Yadava)
-
-    FALCON-1038 Log mover fails for map-reduce action(Peeyush Bishnoi via Ajay Yadava)
-    
-    FALCON-1412 Process waits indefinitely and finally timedout even though missing dependencies are met(Pallavi Rao via Ajay Yadava)
-
-    FALCON-1409 Update API throws NullPointerException(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1407 Temporarily disable failing tests in ClusterEntityValidationIT(Ajay Yadava)
-
-    FALCON-1392 FalconUnit tests fail(Pavan Kumar Kolamuri via Ajay Yadava)
-  
-    FALCON-1344 EntityGraph returns null in list of dependent entities(Ajay Yadava)
-    
-    FALCON-1330 When multiple cluster definitions exist for the same colo, triage produces unexpected results(Ajay Yadava)
-
-    FALCON-1399 Property for default number of results is not loaded dynamically(Ajay Yadava)
-
-    FALCON-1398 CrossEntityValidations contains incorrect validations(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1396 Disable the faulty test(Ajay Yadava via Sowmya Ramesh)
-
-    FALCON-1251 FeedEvictor UT fails intermittently(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1383 MetadataMappingServiceTest#testLineageForTransactionFailure fails randomly(Sandeep Samudrala via Ajay Yadava)
-
-    FALCON-1380 Triage Result throws incorrect error message on prism for an invalid process instance(Ajay Yadava)
-
-    FALCON-1310 Falcon build fails with Oozie-4.2.0(Peeyush Bishnoi via Sowmya Ramesh)
-
-    FALCON-1369 Trunk build is broken(Ajay Yadava via Sowmya Ramesh)
-
-    FALCON-1363 Fix retry policy example in documentation(Peeyush Bishnoi via Ajay Yadava)
-
-    FALCON-1327 When using triage on a server for a process which does not exist on that server,
-    a NullPointerException is encountered(Ajay Yadava)
-
-    FALCON-1325 Triage API on prism, for an instance at which a process does not exist sends incorrect message(Ajay Yadava)
-
-    FALCON-1328 Error in Triage documentation(Karishma Gulati via Ajay Yadava)
-
-    FALCON-1323 Reverse lookup of feeds causes NPE(Ajay Yadava)
-
-    FALCON-1329 Falcon's idempotent behaviour breaks in some cases(Ajay Yadava)
-
-    FALCON-1282 Incorrect hdfs servers property for feed replication in secured environment(Venkat Ranganathan via Ajay Yadava)
-
-    FALCON-1311 Instance dependency API produces inconsistent results in some scenarios(Pragya Mittal via Ajay Yadava)
-
-    FALCON-1268 Instance Dependency API failure message is not intuitive in distributed mode (Ajay Yadava)
-
-    FALCON-1260 Instance dependency API produces incorrect results (Ajay Yadava)
-    
-    FALCON-99 Adding late data to process doesn't create new coord (Pallavi Rao via Suhas Vasu)
-
-    FALCON-1101 Cluster submission in falcon does not create an owned-by edge(Sowmya Ramesh via Ajay Yadava)
-
-    FALCON-1104 Exception while adding process instance to graphdb when feed has partition expression
-    (Pavan Kumar Kolamuri via Ajay Yadava)
-
-    FALCON-1252 The parameter "tagkey" should be "tagkeys" in EntityList and FalconCLI twiki
-    (Ying Zheng via Sowmya Ramesh)
-
-    FALCON-1244 numResults query param in listInstances is ignored when start and end params are not specified
-    (Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1121 Backend support for free-text entity search
-    (Ying Zheng via Sowmya Ramesh)
-
-    FALCON-1051 Lineage apis broken for falcon setup in distributed mode
-    (Pavan Kumar Kolamuri via Ajay Yadava)
-    
-    FALCON-1165 Falcon restart failed, if defined service in cluster entity is unreachable
-    (Peeyush Bishnoi via Ajay Yadava)
-     
-    FALCON-1195 ClusterEntityParserTest fails intermittently
-    (Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-1162 Cluster submit succeeds when staging HDFS dir does not have
-    777 (ALL) permission (Venkat Ramachandran via Sowmya Ramesh)
-
-    FALCON-1153 Instance kill fails intermittently
-    (Balu Vellanki via Sowmya Ramesh)
-
-    FALCON-954 Secure Kerberos setup : Falcon should periodically revalidate 
-    auth token (Balu Vellanki via Ajay Yadava)
-
-    FALCON-1146 feed retention policy deleted everything all the way up
-    to the root (Peeyush Bishnoi via Suhas Vasu)
-
-    FALCON-1129 In a secure cluster, feed replication fails because of
-    Authentication issues (Venkat Ranganathan via Suhas Vasu)
-
-    FALCON-1141 Reverse Lookup for feed in prism fails with BadRequest
-    (Ajay Yadava via Suhas Vasu)
-
-    FALCON-1143 Correcting order of entities on reload
-    (Ajay Yadava via Suhas Vasu)
-
-    FALCON-1144 Dynamic partitions not getting registered in Hcat
-    (Suhas Vasu)
-
-Release Version: 0.6.1
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-   FALCON-1091 Monitoring plugin that registers catalog partition - code
-   (Suhas Vasu / PallaviRao / Shwetha GS via Suhas Vasu)
-
-   FALCON-790 Falcon UI to enable entity/process/feed edits and 
-   management. (Armando Reyna/Kenneth Ho via Srikanth Sundarrajan)
-
-   FALCON-949 Force update feature (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-822 Add reverse look up API (Ajay Yadava via Suhas Vasu)
-
-  IMPROVEMENTS
-   FALCON-1280 Update docs/license licenses with right copyright
-   information (Shaik Idris Ali)
-
-   FALCON-1276 Verify licensing in html5-ui module. 
-   (Shaik Idris Ali)
-
-   FALCON-1245 Verify licensing and update license.txt in falcon-ui module
-   (Shaik Idris Ali)
-
-   FALCON-1220 Exclude downloaded directories from falcon-ui modules
-   (Shaik Idris Ali)
-
-   FALCON-1182 Remove reference to com.google.code.findbugs:annotations(Shaik Idris Ali)
-
-   FALCON-1158 Rename the parameter "pattern" in LIST API to "nameseq"
-   (Ying Zheng via Suhas Vasu)
-
-   FALCON-1142 ClusterEntityParserTest.testClusterWithOnlyStaging
-   fails when run alone(Pallavi Rao via Suhas Vasu)
-
-   FALCON-1128 Adding getter methods to FeedLookupResult
-   (Karishma Gulati via Samarth)
-
-   FALCON-1086 Support execution-order of a feed to be overridden
-   for replication coord (Shaik Idris Ali via Suhas Vasu)
-
-   FALCON-915 Failed to load data. Error: 400 Param user.name
-   can't be empty - Web UI/0.6 (Balu Vellanki via Suhas Vasu)
-
-   FALCON-933 misleading error msg on cluster submit failure
-   (Sowmya Ramesh via Suhas Vasu)
-
-   FALCON-910 Better error messages when creating cluster's directories
-   (karan kumar via Suhas Vasu)
-
-   FALCON-1042 Misleading mesage received while performing touch operation
-   on scheduled entity (Suhas Vasu)
-
-   FALCON-1074 EntityUtil getInstanceSequence is buggy (Ajay Yadava via Suhas vasu)
-
-   FALCON-1024 Updating tags and pipeline elements of the feed/process definition
-   resubmits a new Oozie bundle (Pallavi Rao via Suhas Vasu)
-
-   FALCON-555 API response from instance "params" having multiple "entry" key
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-1003 Update checkstyle to remove DoubleCheckedLocking
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-1012 Add support for MapR-FS (Roman Slysh via Suhas Vasu)
-
-   FALCON-774 Discontinue support for JDK 1.6 and move to 1.7. (Peeyush 
-   Bishnoi via Srikanth Sundarrajan)
-
-   FALCON-263 Adding documentation for params api (Ajay Yadav via Srikanth 
-   Sundarrajan) 
-
-   FALCON-237 falcon feed replication should honour availability flag (Peeyush
-   Bishnoi via Srikanth Sundarrajan)
-
-   FALCON-417 Upgrade Hive and HCatalog to latest stable version. (Peeyush Bishnoi
-   via Srikanth Sundarrajan)
-
-   FALCON-896 Upgrade oozie to 4.1.0 and make it the default for falcon. (Peeyush
-   Bishnoi via Srikanth Sundarrajan)
-
-   FALCON-749 Improve error printed in falcon CLI. (Suresh Srinivas via Shwetha GS)
-
-   FALCON-977 Drop incubator/incubating from falcon codebase. (Shwetha GS
-   via Srikanth Sundarrajan)
-
-   FALCON-952 If hadoop home is set, use $HADOOP_HOME to set falcon classpath
-   (Balu Vellanki via Suhas Vasu)
-
-   FALCON-935 Feed and process late rerun failed in falcon trunk
-   (Shaik Idris Ali via Suhas Vasu)
-
-   FALCON-914 Add option to search for Entities. (Ajay Yadav via Srikanth
-   Sundarrajan) 
-
-   FALCON-256 Create new API for Process dependency graph DAG which captures 
-   process connected via feeds. (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-823 Add path matching ability to the radix tree (Ajay Yadav
-   via Srikanth Sundarrajan) 
-
-   FALCON-329 Falcon client methods should return objects. (Samar via Shwetha GS)
-
-   FALCON-593 Preserve data type for properties in a vertex. (Ajay
-   Yadav via Srikanth Sundarrajan)
-
-   FALCON-785 Document upgrade path from 0.5 to 0.6. (Balu Vellanki 
-   via Srikanth Sundarrajan)
-
-   FALCON-877 Pagination API should have a cap on number of results 
-   returned (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-734 Document when to use Distributed Mode vs. Embedded Mode
-   (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-805 Create store to store feed properties like name against 
-   it's path (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-807 Fix order of actual and expected expression in assert 
-   statements in webapp module (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-784 Update release notes in CHANGES.txt in branch and trunk 
-   (Venkatesh Seetharam)
-
-   FALCON-782 Update trunk version to 0.7-incubating-SNAPSHOT (Venkatesh 
-   Seetharam)
-
-  OPTIMIZATIONS
-   FALCON-1071 Feed Listing API default behaviour for start and end
-   not handled properly. (Suhas Vasu)
-
-   FALCON-1063 Falcon CLI list entities operation throws NullPointerException
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-987 params command inconsistent behavior
-   (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-944 Parallel update APIs create 2 coords (Suhas Vasu)
-
-   FALCON-943 process update copying user lib is very slow. (Shwetha G S)
-
-   FALCON-419 Update deprecated HCatalog API to use Hive Metastore API.
-   (Shwetha GS)
-
-   FALCON-423 Updating falcon server endpoint in distributed setup doesn't 
-   work. (Srikanth Sundarrajan)
-
-   FALCON-913 Change the default values of log clean up services
-   (Suhas vasu)
-
-  BUG FIXES
-   FALCON-1238 Add apache license header to .less css in falcon-ui/app
-   (Shaik Idris Ali)
-
-   FALCON-1224 cannot build falcon - codehaus close service and repo - 
-   fail error 503 (Suhas Vasu)
-
-   FALCON-1161 Test case feedFeedBasePathExists fails intermittently
-   (Suhas Vasu)
-
-   FALCON-1154 Replication is broken (Sowmya Ramesh via Suhas Vasu)
-
-   FALCON-1159 Falcon build is broken (Suhas Vasu)
-
-   FALCON-1146 feed retention policy deleted everything all the way up
-   to the root (Peeyush Bishnoi via Suhas Vasu)
-
-   FALCON-1129 In a secure cluster, feed replication fails because of
-   Authentication issues (Venkat Ranganathan via Suhas Vasu)
-
-   FALCON-1141 Reverse Lookup for feed in prism fails with BadRequest
-   (Ajay Yadava via Suhas Vasu)
-
-   FALCON-1143 Correcting order of entities on reload
-   (Ajay Yadava via Suhas Vasu)
-
-   FALCON-1144 Dynamic partitions not getting registered in Hcat
-   (Suhas Vasu)
-
-   FALCON-1132 Falcon trunk is broken due to failure in IT test
-   (Ajay Yadava via Suhas Vasu)
-
-   FALCON-1097 UT failure for ExpressionHelperTest
-   (Ajay Yadava via Shaik Idris)
-
-   FALCON-1125 Feed Lookup API doesnt work via prism
-   (Ajay Yadava via Suhas Vasu)
-
-   FALCON-1119 Instance logs option is not returning the log location
-   (Suhas Vasu)
-
-   FALCON-1100 UI : Failed to load data. 404 not found
-   (Sowmya Ramesh via Suhas Vasu)
-
-   FALCON-1123 Stacktrace printed by Falcon CLI is not useful to user
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-1117 Falcon Client throws NPE on no emtpy result in
-   reverse lookup (Ajay Yadava via Suhas Vasu)
-
-   FALCON-1099 FeedReplicator: throws NPE (Sowmya Ramesh via Suhas Vasu)
-
-   FALCON-1064 Build hangs at the following test: JMSMessageProducerTest
-   (Sowmya Ramesh via Suhas Vasu)
-
-   FALCON-1070 numResults and offset doesnot work for instance operations
-   (Suhas Vasu)
-
-   FALCON-1025 lastWeek and CurrentWeek functions are getting
-   evaluated incorrect (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-1020 validate command produces different results when run
-   via prism and server (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-950 Rerun does not work on succeeded instances (Suhas Vasu)
-
-   FALCON-1048 Incorrect documentation for feed instacnce listing api. (Suhas
-   Vasu via Srikanth Sundarrajan)
-
-   FALCON-1057 recipes.twiki should be renamed Recipes.twiki for consistency 
-   and to fix a dead link (Aaron Dossett via Srikanth Sundarrajan)
-   
-   FALCON-1052 pipeline feature is not able to group by through CLI
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-1047 Feed Instance Listing doesn't return any output or error
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-1050 Falcon rerun is broken for timed out instances
-   (Suhas Vasu)
-
-   FALCON-1045 Falcon rerun is broken in trunk (Suhas Vasu)
-
-   FALCON-973 Add LogMover Service for yarn
-   (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-993 Falcon Oozie adaptor test case failed with umask issue
-   (Peeyush Bishnoi via Srikanth Sundarrajan)
-
-   FALCON-1000 ITs fail with NoSuchFieldError: INSTANCE. (Shwetha GS)
-
-   FALCON-109 submission of clusters for non registered colos 
-   (Pallavi Rao via Suhas Vasu)
-
-   FALCON-995 Sharelib directory does not exist in webapp
-   (Peeyush Bishnoi via Suhas Vasu)
-
-   FALCON-984 Falcon build is broken. (Peeyush Bishnoi via Srikanth
-   Sundarrajan)
-
-   FALCON-945 Fix broken lineage feature in Web-UI. (Adam Kawa via Srikanth
-   Sundarrajan)
-
-   FALCON-921 Validate command is exposed only through prism. (Ajay Yadav 
-   via Shwetha GS)
-
-   FALCON-916 Falcon idempotency is being voilated during delete. (Balu
-   Vellanki via Srikanth Sundarrajan)
-
-   FALCON-917 Fix corner cases while getting job id during instance status
-   (Suhas Vasu) 
-
-
-Release Version: 0.6-incubating
-
-   FALCON-785 Document upgrade path from 0.5 to 0.6 (Venkatesh Seetharam)
-
-  INCOMPATIBLE CHANGES
-   FALCON-753 Change the ownership for staging dir to user submitting the feed
-   (Venkatesh Seetharam)
-
-  NEW FEATURES
-   FALCON-722 Add SLA for processes (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-721 Add SLA for Feeds (Ajay Yadav via Suhas Vasu)
-
-   FALCON-687 Add hooks for extensions in Audit (Venkatesh Seetharam)
-
-   FALCON-636 Add a sample recipe for disaster recovery of hdfs dirs/files
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-635 Add recipe option in falcon client (Sowmya Ramesh via
-   Venkatesh Seetharam)
-
-   FALCON-615 Add pipleline element to lineage graph
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-614 Add pipeline element to process entity
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-588 Baselining designer code. (samar via Shwetha GS)
-
-   FALCON-400 Add Authorization for Entities (Venkatesh Seetharam)
-      FALCON-279 Add ACL for Cluster Entity
-      FALCON-462 Add ACL for process entity
-      FALCON-463 Validate Feed ACL only if authorization is enabled
-      FALCON-464 Enforce Authorization for REST API
-      FALCON-468 Add User Documentation for authorization feature
-      FALCON-557 Add super-user who is authorized for all
-
-   FALCON-466 REST APIs must add the entity owner as an implicit filter (Balu Vellanki
-   via Venkatesh Seetharam)
-
-   FALCON-263 API to get workflow parameters. (pavan kumar kolamuri via Shwetha GS)
-
-  IMPROVEMENTS
-   FALCON-320 Whitespace has not been taken into consideration in runtime.properties
-
-   FALCON-766 Falcon workflow rerun by default should rerun only Failed nodes
-   and remove continue option (Shaik Idris Ali via Venkatesh Seetharam)
-
-   FALCON-744 Document existing falcon data archival to cloud store
-   (Venkatesh Seetharam)
-
-   FALCON-869 Clean up required in the instance API calls. (Suhas Vasu)
-
-   FALCON-878 Move falcon html files to HTML5. (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-145 Feed eviction be implemented in appropriate Storage 
-   implementation. (Ajay Yadav via Srikanth Sundarrajan)
-
-   FALCON-813 Expose job id for running jobs in Falcon (Suhas Vasu)
-
-   FALCON-834 Propagate request id in the response to help trace and debug
-   failures in merlin (Venkatesh Seetharam)
-
-   FALCON-828 Add hadoop version info as part of falcon admin version API
-   (Venkatesh Seetharam)
-
-   FALCON-762 Support feed listing for file system storage (Srikanth Sundarrajan)
-
-   FALCON-20 Remove dependency on custom InMobi DistCp (Sowmya Ramesh via
-   Venkatesh Seetharam)
-
-   FALCON-758 Discontinue support for Oozie-3.x (Peeyush Bishnoi via
-   Venkatesh Seetharam)
-
-   FALCON-757 Discontinue support for Hadoop-1.x (Sowmya Ramesh via
-   Venkatesh Seetharam)
-
-   FALCON-748 Falcon throws '413 Full Head' error message when kerberos is
-   enabled with AD (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-767 Upgrade Hadoop from 2.4 to 2.5 (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-651 Typos in Falcon Documentation (Ajay Yadav via Venkatesh Seetharam)
-
-   FALCON-576 Update Installation notes for Mac OS (Ajay Yadav via
-   Venkatesh Seetharam)
-
-   FALCON-666 Add Alerts for unrecoverable failures (Venkatesh Seetharam)
-
-   FALCON-665 Handle message consumption failures in JMSMessageConsumer
-   (Venkatesh Seetharam)
-
-   FALCON-662 Fetch relationships for a given type API (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-661 Add list types to Lineage API (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-654 Exclude junit dependency in pom (Ruslan Ostafiychuk)
-
-   FALCON-640 Add ability to specify sort order for orderBy param in RestAPI
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-594 Process lineage information for Retention policies
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-325 Process lineage information for Replication policies
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-474 Add Bulk APIs to drive the dashboard needs (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-166 Instance status start and end dates are rigid and inconvenient
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-612 Create CHANGES.txt for falcon-regression(Arpit Gupta via SamarthG)
-
-   FALCON-470 Add support for pagination, filter by, etc. to Entity and
-   Instance List API (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-327 - Simplify message passing framework (Venkatesh Seetharam)
-    FALCON-484 - Refactor workflow related code into context, listener and Notification
-    FALCON-485 - Simplify JMS Message Sender/Consumer and use Workflow Context
-    FALCON-486 - Introduce Workflow Context in Post Processing
-    FALCON-487 -Introduce Workflow Context in Rerun framework
-    FALCON-488 - Introduce Workflow Context in Lineage Service
-    FALCON-492 - Refactor workflow listener implementation init config driven
-
-   FALCON-554 Extend time functions available in Falcon to support current & 
-   last week (Contributed by Srikanth Sundarrajan)
-
-   FALCON-369 Refactor workflow builder. (Shwetha GS)
-
-   FALCON-280 Validate the ACL in Feed entity with the user submitting the entity
-   (Jean-Baptiste Onofré via Venkatesh Seetharam)
-
-   FALCON-445 Propagate hive credentials defined in cluster entity to
-   hive-site.xml (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-133 Upgrade to slf4j 1.7.5 and use SLF4J logger. (Jean-Baptiste Onofré
-   via Shwetha GS)
-
-   FALCON-353 enable dry run feature of oozie for schedule 
-   and update (Shwetha GS via Suhas Vasu)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-   FALCON-829 Better error message for cluster submission failure
-   (Karan Kumar via Venkatesh Seetharam)
-
-   FALCON-824 Remove usage of Hadoop incompatible API
-   (Shwetha G S via Venkatesh Seetharam)
-
-   FALCON-894 Cluster submission with hive registry fails in secure setup
-   (Venkatesh Seetharam)
-
-   FALCON-892 HCatReplication fails in secure setup (Venkatesh Seetharam)
-
-   FALCON-889 Windows azure replication fails with "wasb" as the scheme to an
-   HDFS file system (Chris Nauroth via Venkatesh Seetharam)
-
-   FALCON-885 RequestID is coming as null for all instance API calls
-   (Venkatesh Seetharam)
-
-   FALCON-880 Oozie Java actions for hive tables fail in secure mode
-   (Venkatesh Seetharam)
-
-   FALCON-717 Shutdown not clean for JMSMessageConsumer
-   (Shaik Idris Ali via Venkatesh Seetharam
-
-   FALCON-875 Enitiy Summary endpoint filterBy does not filter entities
-   without pipelines (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-874 Rest API - Instance and Entity orderBy param values should be
-   case insensitive (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-820 Fix minor nits with HadoopClientFactory
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-815 Rename mapBandwidthKB to mapBandwidth in DistCp
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-595 Improvements to DefaultAuthorizationProvider code
-   (Raghav Kumar Gautam via Venkatesh Seetharam)
-
-   FALCON-868 Rerun command incorrect in falcon CLI documentation
-   (Karishma Gulati via Venkatesh Seetharam)
-
-   FALCON-864 Falcon superuser is unable to delete scheduled feed
-   (Venkatesh Seetharam)
-
-   FALCON-862 Falcon entity Rest API - filter by tags also returns entities
-   that do not have tags (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-795 Maven Enforcer plugin is throwing error while building Falcon
-   (Peeyush Bishnoi via Venkatesh Seetharam)
-
-   FALCON-859 EntitySummary RestAPI : Change pathParam "cluster" to queryParam
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-858 entity/instance Rest API - make keys for filterBy param case
-   insensitive (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-857 Authorization failure results in internal server error
-   (Venkatesh Seetharam)
-
-   FALCON-850 Cluster summary UI page results in 400 Bad Request
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-851 Super user authorization is broken (Venkatesh Seetharam)
-
-   FALCON-840 Possible NPE in filteredInstanceSet method of
-   AbstractInstanceManager (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-839 Authorization succeeds with invalid acl owner based on group
-   membership (Venkatesh Seetharam)
-
-   FALCON-831 Operation on non existing entity throws internal server error
-   (Venkatesh Seetharam)
-
-   FALCON-710 start/end is optional for lifecycle operations (Balu Vellanki
-   via Venkatesh Seetharam)
-
-   FALCON-819 Submission of cluster with registry interface fails on secure
-   setup (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-804 Remove Oozie 3.* patch files from Falcon (Peeyush Bishnoi via
-   Venkatesh Seetharam)
-
-   FALCON-803 Instances displayed on Dashboard entity page should be sorted
-   by startTime (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-799 Falcon Dashboard unusable when server is started with umask 077
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-678 Falcon's default port has changed to 15443 (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-482 Concurrent requests made using Proxy Oozie client fail
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-764 Falcon retry of failed process/feed instances broken during restart
-   (Shaik Idris via Suhas Vasu)
-
-   FALCON-800 Falcon lineage is not working (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-792 Integration test EntityManagerPaginationJerseyIT fails
-   occasionally (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-787 FalconCLI - Submit recipe failed (Sowmya Ramesh via
-   Venkatesh Seetharam)
-
-   FALCON-786 FalconAuditFilter - Arguments in wrong order (Venkatesh Seetharam)
-
-   FALCON-789 Post processing is broken for Feeds (Sowmya Ramesh via
-   Venkatesh Seetharam)
-
-   FALCON-788 Lineage: Minor bug fixes (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-777 UT intermittent failures in Messaging tests depending on Thread.sleep
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-773 Log clean up handlers only work in distributed mode
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-760 Messaging is broken for FALCON.ENTITY.TOPIC in case of Eviction
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-768 Change dashboard USER_ID to falcon-dashboard (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-759 Adding cluster entity fails when Yarn TimelineClient is enabled
-   (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-740 Entity kill job calls OozieClient.kill on bundle coord job ids
-   before calling kill on bundle job id (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-732 Lineage capture fails for an instance thats not generated by
-   falcon (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-731 Lineage capture for evicted instance is broken
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-724 Build fails as Integration test fails (Balu Vellanki via
-   Venkatesh Seetharam)
-
-   FALCON-728 Unit test failures on jenkins (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-727 JQuery license is incorrectly mentioned in the header file. (Srikanth Sundarrajan)
-
-   FALCON-702 JailedFileSystem does not work for relative paths
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-677 Feed data and stats path defaults to /tmp/. (Suhas Vasu via Shwetha GS)
-
-   FALCON-590 Update to ACLs added to process is not handled
-   (Venkatesh Seetharam)
-
-   FALCON-694 StringIndexOutOfBoundsException while updating graph DB for
-   replicated instance (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-695 Lineage: "stored-in" edge is added between feed entity and
-   target cluster (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-669 Missing optional workflow execution listeners configuration
-   results in NPE (Raghav Kumar Gautam via Venkatesh Seetharam)
-
-   FALCON-644 Falcon message producer masks errors in Post processing
-   (Venkatesh Seetharam)
-
-   FALCON-338 - late data recording is enabled by default for all feeds
-   irrespective of late arrival config (Ajay Yadav via Suhas Vasu)
-
-   FALCON-652 EntityUtils tests are failing (Ajay Yadav via Venkatesh Seetharam)
-
-   FALCON-650 Instance list APIs occassionally fail when orderBy set to
-   starttime or endtime (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-649 Remove unnecessary validation for Instance start time in
-   FalconCLI (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-579 Lineage breaks if feed.xml doesn't have the date pattern in
-   feed path location (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-642 OozieProcessWorkflowBuilderTest test failures. (Shwetha GS)
-
-   FALCON-630 late data rerun for process broken in trunk. (Shwetha GS)
-
-   FALCON-611 Post process arg status is in 'FAILED' state always
-   (Shwetha GS via Suhas Vasu)
-
-   FALCON-622 Fix ProcessUpdate and update at specific time test
-   (Samarthg)
-
-   FALCON-616 cluster submission should fail when shared libs copy fail
-   (Shwetha GS via Suhas Vasu)   
-
-   FALCON-598 ProcessHelper throws NPE if the process has no inputs OR no
-   outputs defined (Balu Vellanki via Venkatesh Seetharam)
-
-   FALCON-583 Post processing is broken in current trunk
-   (Venkatesh Seetharam via Suhas Vasu)
-
-   FALCON-582 Latest changes to LICENSE files results in build failure
-   (Srikanth Sundarrajan via Venkatesh Seetharam)
-
-   FALCON-577 Extend time functions added in Falcon 0.6 to support current & last week
-   are giving UnsupportedOperationException. (Srikanth Sundarrajan via Shwetha GS)
-
-   FALCON-578 Table import & table export failing during hcat table replication
-   (Suhas Vasu)
-   
-   FALCON-571 user libs not getting loaded during process execution 
-   (Shwetha GS via Suhas Vasu)
-
-   FALCON-514 Falcon CLI giving error when using -file option with -rerun in 
-   instance management. (pavan kumar kolamuri via Shwetha GS)
-
-   FALCON-566 update throwing null job id exception. (Shwetha GS)
-
-   FALCON-561 Falcon jenkins failure in EntityManagerJerseyIT.
-   (Shwetha GS via Suhas Vasu)
-
-   FALCON-556 Feed Replication _SUCCESS is not getting created on target directory.
-   (pavan kumar kolamuri via Suhas Vasu)
-
-   FALCON-497 Able to submit feed even though owner of storage specified (location type=data) 
-   is different from the ACL owner. (Shwetha GS)
-
-   FALCON-357 HCatalog Feed replication: Hive export job fails when table partition 
-   contains multiple dated columns. (Satish Mittal via Shwetha GS)
-
-   FALCON-495 multi source single target feed replication failing in regression.
-   (Satish Mittal via Shwetha GS)
-
-   FALCON-494 update feature broken. (Shwetha GS via Suhas Vasu)
-
-   FALCON-496 Feed Replication with Empty Directories giving error. 
-   (pavan kumar kolamuri via Shwetha GS)
-
-   FALCON-483 Fix the failing test ConfigurationStoreTest.testConcurrentRemoves 
-   on jenkins. (Sowmya Ramesh via Shwetha GS)
-
-   FALCON-430 Process update with user (Shwetha GS via Venkatesh Seetharam)
-
-   FALCON-460 Concurrent deletion of same entity results in NPE (Sowmya Ramesh
-   via Venkatesh Seetharam)
-
-   FALCON-459 Lineage resource API fails with NPE for bad query params
-   (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-456 Custom DistCp conflict with core DistCp in container classpath
-   fails falcon workflows (Venkatesh Seetharam)
-
-   FALCON-446 Hive Replications fail because of permissions issue
-   (Venkatesh Seetharam)
-
-   FALCON-444 Logs dir for replication workflow is incorrect and jobs fail
-   with permission issues (Venkatesh Seetharam)
-
-   FALCON-443 Process with Hive workflow engine and filesystem input feeds,
-   table output feed fails (Sowmya Ramesh via Venkatesh Seetharam)
-
-   FALCON-382 Error While building Latest trunk code with Hadoop 2.2.0. (Suhas Vasu)
-
-   FALCON-240 Instance status from CLI on a feed doesn't give the retention details. 
-   (pavan kumar kolamuri via Shwetha GS)
-
-   FALCON-441 Lineage capture fails for feeds with multiple instances
-   (Venkatesh Seetharam)
-
-Release Version: 0.5-incubating
-  INCOMPATIBLE CHANGES
-    FALCON-11 Add support for security in Falcon (Venkatesh Seetharam)
-
-    FALCON-288 Persist lineage information into a persistent store
-    (Venkatesh Seetharam)
-
-  NEW FEATURES
-    FALCON-281 Design Action Interface. (Srikanth Sundarrajan)
-
-    FALCON-259 Design Transformation Interface. (Srikanth Sundarrajan)
-
-    FALCON-26 Pipeline Monitor addition. (Suhas Vasu via Shwetha GS)
-
-    FALCON-254 Bootstrap designer module. (Srikanth Sundarrajan via Shwetha GS)
-
-    FALCON-238 Support updates at specific time. (Shwetha GS)
-
-    FALCON-285 Support Lineage information capture (Venkatesh Seetharam)
-
-    FALCON-289 Provide REST APIs for discovering lineage metadata over the store.
-    (Venkatesh Seetharam)
-
-    FALCON-229 Enable SSL for Falcon REST API (Srikanth Sundarrajan via
-    Venkatesh Seetharam)
-   
-  IMPROVEMENTS
-    FALCON-453 possible LICENSE/NOTICE issues (Chris Douglas via Venkatesh Seetharam)
-
-    FALCON-450 Add jdk 1.6 requirement in release notes (Venkatesh Seetharam)
-
-    FALCON-449 Twiki documentation files may need ALv2 headers (Venkatesh Seetharam)
-
-    FALCON-448 File names in the release should have apache as the prefix
-    (Venkatesh Seetharam)
-
-    FALCON-439 Update the copyright in Notice and License files (Venkatesh Seetharam)
-
-    FALCON-324 Document lineage feature (Sowmya Ramesh via Venkatesh Seetharam)
-
-    FALCON-312 Falcon LogCleanupServiceTest seems to clean up root "/"
-    (Venkatesh Seetharam)
-
-    FALCON-379 Document limitations of Hive support in Falcon (Venkatesh Seetharam)
-
-    FALCON-421 Upgrade Hadoop to 2.4.0 (Sowmya Ramesh via Venkatesh Seetharam)
-
-    FALCON-386 Upgrade to oozie 4.0.1. (Sowmya Ramesh via Venkatesh Seetharam)
-
-    FALCON-392 HCat Example Recipes. (Shwetha GS via Venkatesh Seetharam)
-
-    FALCON-391 Add ability to set mapBandwidth. (Michael Miklavcic via Shwetha GS)
-
-    FALCON-380 The dependency option doesn't mention input or output for a feed.
-    (Suhas Vasu via Shwetha GS)
-
-    FALCON-365 Remove the checked in oozie xsds. (Shwetha GS)
-
-    FALCON-356 Merge OozieProcessMapper and OozieProcessWorkflowBuilder. (Shwetha GS)
-
-    FALCON-355 Remove SLAMonitoringService. (Shwetha GS)
-
-    FALCON-333 jsp-api dependency is defined twice. (Jean-Baptiste
-    Onofrévia Shaik Idris)
-
-    FALCON-311 Several dead links in Falcon documentation.
-    (Suresh Srinivas via Venkatesh Seetharam)
-
-    FALCON-304 Simplify assembly for script in standalone and distributed
-    mode. (Suresh Srinivas via Venkatesh Seetharam)
-
-    FALCON-264 Update with effectiveTime in CLI. (Shwetha GS via Srikanth
-    Sundarrajan)
-
-    FALCON-60 Feed retention doesn't delete empty parent dirs. (Shaik Idris)
-
-    FALCON-247 Add scripts to check the status of falcon and prism. (Jean-Baptiste 
-    Onofré via Shwetha GS)
-
-    FALCON-245 POM should use Apache POM as parent. (Jean-Baptiste Onofré via 
-    Shwetha GS)
-
-    FALCON-252 Upgrade to json-simple 1.1.1. (Jean-Baptiste Onofré via 
-    Shwetha GS)
-
-    FALCON-233 Update hadoop 2 version to 2.2.0. (Venkatesh Seetharam
-    via Shwetha GS)
-
-    FALCON-64 Add example entity xmls in falcon package. (Shwetha GS)
-    
-    FALCON-66 Make oozie version change configurable. (Shwetha GS
-    via Srikanth Sundarrajan)
-    
-    FALCON-38 Falcon's parent workflow actions (pre-processing & prost-processing) 
-    should have multiple retries. (Shaik Idris)
-
-    FALCON-242 Post processing is not called in Retention workflows (Shaik Idris)
-
-    FALCON-286 Capture information in process entity about the user workflow
-    (Venkatesh Seetharam)
-
-    FALCON-287 Record lineage information in post processing (Venkatesh Seetharam)
-
-    FALCON-257 File system storage wont work with relative paths
-    (Venkatesh Seetharam)
-
-    FALCON-334 Add indexing to the graph property keys. (Venkatesh Seetharam)
-
-    FALCON-342 Add ability to set maxMaps on replication (Michael Miklavcic
-    via Venkatesh Seetharam)
-
-    FALCON-366 Add a REST API to get properties for a given vertex (Venkatesh Seetharam)
-
-    FALCON-370 Remove Graph dump option in CLI (Venkatesh Seetharam)
-
-    FALCON-367 Bump dagre and jquery version for the web UI (Haohui Mai
-    via Venkatesh Seetharam)
-
-    FALCON-290 Visualize lineage information on the dashboard (Haohui Mai
-    via Venkatesh Seetharam)
-
-    FALCON-371 Show vertex information in the web UI (Haohui Mai via Venkatesh Seetharam)
-
-    FALCON-373 Display lineage link only for jobs that are succeeded in the web UI
-    (Haohui Mai via Venkatesh Seetharam)
-
-    FALCON-393 Display error messages when the web UI fails to get the data from the server
-    (Haohui Mai via Venkatesh Seetharam)
-
-    FALCON-395 Falcon service does not start when _HOST is used in the spnego principal
-    (Venkatesh Seetharam)
-
-    FALCON-397 DistCp uses JobSubmissionFiles API which is incompatible between hadoop versions
-    (Venkatesh Seetharam)
-
-  OPTIMIZATIONS
-    FALCON-123 Improve build speeds in falcon. (Srikanth Sundarrajan via Shwetha GS)
-
-  BUG FIXES
-    FALCON-447 Remove the jar file form the source distribution (Venkatesh Seetharam)
-
-    FALCON-440 Exclude IDEA IntelliJ and other unnecessary files from source
-    distribution (Venkatesh Seetharam)
-
-    FALCON-437 Feed Replication workflows are failing (Venkatesh Seetharam)
-
-    FALCON-429 Falcon CLI entity list command throws NPE stack trace in case
-    of no entities (Satish Mittal via Venkatesh Seetharam)
-
-    FALCON-427 Update default falcon endpoint URL in runtime.properties for
-    prism setup to work out of box (Satish Mittal via Venkatesh Seetharam)
-
-    FALCON-425 src/bin/package.sh is broken for hadoop 2.* versions
-    (Sowmya Ramesh via Venkatesh Seetharam)
-
-    FALCON-426 Prism unable to connect to falcon server if falcon endpoint URL ends with
-    '/' character. (Satish Mittal via Shwetha GS)
-
-    FALCON-401 replicate hcat table to two targets can fail with export dir
-    exists on source (Sowmya Ramesh via Venkatesh Seetharam)
-
-    FALCON-364 Process specification contains a mistake in the parallel section
-    (Jean-Baptiste Onofré via Venkatesh Seetharam)
-
-    FALCON-398 Null pointer Exception on submitting HCat Feed (Sowmya Ramesh
-    via Venkatesh Seetharam)
-
-    FALCON-390 falcon HCatProcess tests are failing in secure clusters
-    (Venkatesh Seetharam)
-
-    FALCON-284 Hcatalog based feed retention doesn't work when partition filter spans across
-    multiple partition keys. (Satish Mittal via Shwetha GS)
-
-    FALCON-409 Not able to create a package. (Raju Bairishetti via Shwetha GS)
-
-    FALCON-396 minor logging typo in FalconTopicSubscriber. (Raghav Kumar Gautam via Shwetha GS)
-
-    FALCON-352 old properties not removed on refresh of runtime properties. (Shaik Idris)
-
-    FALCON-332 Oozie execution order is wrongly mapped in Falcon for LAST_ONLY. 
-    (Shaik Idris)
-
-    FALCON-374 Update with effective time in CLI. (Shwetha GS)
-
-    FALCON-98 starting embedded hadoop fails sometimes. (Shwetha GS)
-
-    FALCON-28 unable to submit/delete feed or process which had been attempted submit 
-    with wrong cluster. (Shwetha GS)
-
-    FALCON-269 Mistake in the embedded ActiveMQ port property. (Jean-Baptiste Onofré
-    via Shwetha GS)
-
-    FALCON-344 FeedEvictorTest. (Shaik Idris Ali via Shwetha GS)
-
-    FALCON-302 Issues with distributed package properties. (Shwetha GS)
-
-    FALCON-331 Summary API returning NPE if only start is passed. (Shwetha GS)
-
-    FALCON-323 Summary API deserialization fails if array has only one element.
-    (Shwetha GS via Shaik Idris)
-
-    FALCON-319 Summary instance API returning wrong exception. (Shwetha GS via 
-    Shaik Idris)
-
-    FALCON-321 Feed evictor deleting more stuff than it should. (Shaik Idris)
-
-    FALCON-330 Build fails for the module 'Apache Falcon CLI client ' on ubuntu.
-    (Deepak Marathe via Shwetha GS)
-
-    FALCON-297 Validations on update with effective time. (Shwetha GS)
-
-    FALCON-278 Changes in feed availability info doesn't update process. (Shwetha GS)
-
-    FALCON-239 Build failed on build-tools due to a missing SNAPSHOT. (Srikanth 
-    Sundarrajan)
-
-    FALCON-221 Logmover is not copying all action level logs. (Srikanth Sundarrajan)
-
-    FALCON-270 Checkstyle can not be run on a module. (Jean-Baptiste Onofré via 
-    Shwetha GS)
-
-    FALCON-260 When a process is scheduled, the user workflow is failing with 
-    OozieClientException. (Shwetha GS)
-
-    FALCON-268 Checkstyle/Findbugs issues on FalconCLI. (Jean-Baptiste Onofré via 
-    Shwetha GS)
-
-    FALCON-258 Falcon status throws an error when external jobids are missing 
-    (Suhas Vasu via Shaik Idris)
-
-    FALCON-262 Example files should use aligned dependency versions. (Jean-Baptiste Onofré
-    via Shwetha GS)
-
-    FALCON-249 Fix mistakes in docs/InstallationSteps.html. (Jean-Baptiste Onofré
-    via Shwetha GS)
-
-    FALCON-241 The job logs are not copied during Post-Processing step in Replication workflows.
-    (Suhas Vasu via Shwetha GS)
-
-    FALCON-106 Falcon compilation fails for hadoop-2 profile. (Shwetha GS
-    via Srikanth Sundarrajan)
-
-    FALCON-206 Process update for wf changes. (Shwetha GS)
-
-    FALCON-236 Falcon process output events which is optional should have at least one event 
-    if defined. (Shaik Idris Ali via Shwetha GS)
-
-    FALCON-228 Build failed on build-tools module. (Jean-Baptiste Onofré
-    via Shwetha GS)
-
-    FALCON-232 mvn clean verify fails with oozie war not found. (Shwetha GS)
-
-    FALCON-124 unable to schedule deleted feed. (Shwetha GS via Shaik Idris)
-
-    FALCON-226 Tests for subscribing/listening to multiple active-mq topics.  (Shaik Idris 
-    via Shwetha GS)
-
-    FALCON-336 Fix check style error - builds fail (Venkatesh Seetharam)
-
-    FALCON-354 Bug when MetadataMappingService is not configured as one of the
-    application services. (Venkatesh Seetharam)
-
-    FALCON-360 Lineage recording fails with NPE for processes with >1 inputs.
-    (Venkatesh Seetharam)
-
-    FALCON-378 Feed status fails with NPE post update when the new coordinator
-    is in PREP state (Venkatesh Seetharam)
-
-    FALCON-377 Unit test fails consistently - EntityManagerJerseyIT.testProcessInputUpdate.
-    (Shwetha GS via Venkatesh Seetharam)
-
-    FALCON-418 FALCON-395 introduces a regression with hadoop-1
-    (Jean-Baptiste Onofré via Venkatesh Seetharam)
-
-Release Version: 0.4-incubating
-
-   NEW FEATURES
-
-    FALCON-54: Falcon support for hadoop 2.0
-
-    FALCON-85: Hive (HCatalog) integration
-
-    FALCON-164: Provide Falcon Prism Web UI
-
-    FALCON-107: Adding extensions
-
-   IMPROVEMENTS
-
-    FALCON-73: Remove JUnit dependency as its used only for asserts while TestNG for tests
-
-    FALCON-80: Option in falcon-start to not start embedded activemq
-
-    FALCON-83: Option to build distributed packages
-
-    FALCON-131: Remove set -e from falcon scripts
-
-    FALCON-151: Document the REST APIs for Falcon
-
-    FALCON-165: simplify packaging to create one package instead of client, falcon and prism packages
-
-    FALCON-192: Release falcon 0.4 umbrella
-
-    FALCON-62: Falcon compilation with hadoop 2.0 libs
-
-    FALCON-63: Ability to ingest hadoop libs to falcon release package
-
-    FALCON-67: Remove ant dependency in oozie workflows
-
-    FALCON-76: Use cluster readonly endpoint for replication source
-
-    FALCON-78: Falcon error when prism on one hadoop version and server on another
-
-    FALCON-86: Hive table integration with cluster entity
-
-    FALCON-87: Hive table integration with feed entity
-
-    FALCON-88: Add embedded hive and webhcat for integration tests
-
-    FALCON-90: Propagate hive table through Process mapper
-
-    FALCON-91: Handle feed updates in purview of tables
-
-    FALCON-93: Replication to handle hive table replication
-
-    FALCON-94: Retention to handle hive table eviction
-
-    FALCON-95: Enable embedding hive scripts directly in a process
-
-    FALCON-96: Hive client to talk to the metastore
-
-    FALCON-102: Add integration tests for feed entity parser with table defined
-
-    FALCON-103: Upgrade oozie to 4.0.x
-
-    FALCON-104: FALCON-86 introduces a backward incompatible change
-
-    FALCON-113: Update documentation for Hive integration
-
-    FALCON-129: Disable Late data handling for hive tables
-
-    FALCON-143: Enable Late data handling for hive tables
-
-    FALCON-146: Javadoc generation should respect skipCheck property
-
-    FALCON-153: Feed Storage type isn&#39;t passed in replication for successful post processing
-
-    FALCON-154: Remove webhcat startup in HadoopStartupListener as its not used
-
-    FALCON-161: Feed evictor evicts instances not eligible when pattern has dash
-
-    FALCON-163: Merge FALCON-85 branch into main line
-
-    FALCON-171: Provide status in /api/entities/list
-
-    FALCON-175: Visualize dependency information
-
-    FALCON-178: Implement client-side pagination
-
-    FALCON-190: /api/entities/list should allow the client to query different fields
-
-    FALCON-193: Update the documentation to reflect the current work of dashboard
-
-    FALCON-43: Add Falcon to Sonar for Analysis
-
-    FALCON-134: Remove jsch dependency
-
-    FALCON-135: Remove jzlib dependency
-
-    FALCON-136: Upgrade to commons-codec 1.8
-
-    FALCON-137: Remove commons-logging dependency
-
-    FALCON-138: Remove perf4j dependency
-
-    FALCON-198 Update LICENSE.txt to contain license information for all third-party libraries
-
-    FALCON-363 Jetty is started with war when the war is already expanded
-    (Venkatesh Seetharam)
-
-    FALCON-358 REST API does not conform to Rexster (Venkatesh Seetharam)
-
-    FALCON-359 Instance id's captured are of different formats in process and feed
-    (Venkatesh Seetharam)
-
-  BUG FIXES
-
-    FALCON-223: hive-exec bundles protobuf-2.4.1 which is incompatible with hadoop-2 requiring protobuf-2.5
-
-    FALCON-213: Validating Storage Exists (table) should be called only from the server (not from prism)
-
-    FALCON-24: Allow re-run of timedout instances
-
-    FALCON-42: Simplify Build and Installation Guide
-
-    FALCON-50: Falcon replication having issue to s3
-
-    FALCON-61: The identifier regex for oozie bundle,coordinator, workflow pattern is incorrect.
-
-    FALCON-69: exception from ConfigurationChangeListener should fail the API
-
-    FALCON-71: mvn jetty:run doesn&#39;t work
-
-    FALCON-72: Feeds with invalid oozie URI in cluster cannot be deleted
-
-    FALCON-74: Falcon CLI needs to be more robust in handling trailing spaces
-
-    FALCON-79: Fix typos in Falcon architecture document
-
-    FALCON-100: Build error with mvn 3.0.4
-
-    FALCON-101: mvn release:prepare fails because of build order
-
-    FALCON-127: Fix test issues after FALCON-107
-
-    FALCON-128: Feed replication post processing log mover error
-
-    FALCON-130: Use Geronimo Specs JMS instead of javax.jms
-
-    FALCON-132: assembly plugin for embedded does not generate client artifacts
-
-    FALCON-142: Cleanup testng dependency
-
-    FALCON-152: REST API for entity &amp; Admin resources only returns XML
-
-    FALCON-159: Version API endpoint does not emit valid JSON
-
-    FALCON-162: Update falcon scripts to let the user set the log and pid locations.
-
-    FALCON-168: Different validity of clusters in feed not handled
-
-    FALCON-169: multiple &quot;/&quot; in target for replication for multi target feed
-
-    FALCON-174: bind activemq to all interfaces rather than just localhost
-
-    FALCON-179: Table replication must drop partition before import as late reruns fails
-
-    FALCON-180: Disable table replication for multiple sources
-
-    FALCON-182: Disable specifying partitions in inputs with table storage for process
-
-    FALCON-185: Timestamp handled as int
-
-    FALCON-186: Enable scheduler support for embedded activemq
-
-    FALCON-187: Update Falcon start script to also redirect stdout to the log file
-
-    FALCON-188: hadoop-2 profile is not carried through to oozie package
-
-    FALCON-194 Import external libraries into the repository
-
-    FALCON-204 Process mapper generates params assuming table input and output
-    as names literally (Venkatesh Seetharam)
-
-    FALCON-201 Package oozie-el-extensions jar files as part of tarball
-    (Ashish Singh via Venkatesh Seetharam)
-
-    FALCON-203 Do not generate transitive closure for the dependency graph
-    (Haohui Mai via Venkatesh Seetharam)
-
-    FALCON-212 OozieClient does not propagate user in request to
-    Oozie server (Venkatesh Seetharam)
-
-    FALCON-208 Improve the icons for instance lists. (Haohui Mai)
-
-    FALCON-158 Fix failing jenkins nightly build. (Venkatesh Seetharam)
-
-    FALCON-215 Falcon-start fails. (Arpit Gupta via Venkatesh Seetharam)
-
-
-
-Release Version: 0.3-incubating
-
-    FALCON-70 Confirm if v0.3 branch is ready for release as per guideline.
-    (Venkatesh Seetharam via Srikanth Sundarrajan)
-
-    FALCON-65 Avoid bundling compilation path dependent configs in artifact
-    and allow for config scan to seamlessly fallback to classpath if not found
-    in default conf location. (Srikanth Sundarrajan via Venkatesh Seetharam)
-
-    FALCON-58 Gaps in release source / binary tarball. (Srikanth Sundarrajan)
-
-    FALCON-57 Build fails if the source is not in scm. (Srikanth Sundarrajan)
-
-    FALCON-53 Add -incubating to the version. (Jarek Jarcec Cecho via Srikanth
-    Sundarrajan)
-
-    FALCON-56 Update Falcon packing to include all source files. (Srikanth
-    Sundarrajan)
-
-    FALCON-55 Update suspends old oozie coords. (Shwetha GS via Srikanth 
-    Sundarrajan)
-
-    FALCON-52 Main module configured used with jetty:run has issues with app 
-    start. (Srikanth Sundarrajan)
-
-    FALCON-30 Enable embedding pig scripts directly in a process. (Venkatesh
-    Seetharam via Srikanth Sundarrajan)
-
-    FALCON-47 Falcon Replication should support configurable delays in feed, 
-    parallel, timeout and bulk transfer with variable frequency (Shaik Idris
-    Ali via Srikanth Sundarrajan)
-
-    FALCON-49 Dependency on oozie-client pulls in hadoop-auth-0.23.1 which 
-    is not necessary (Venkatesh Seetharam via Srikanth Sundarrajan)
-
-    FALCON-32 Store test output in file rather then printing all the output
-    into the console (Srikanth Sundarrajan via Venkatesh Seetharam)
-
-    FALCON-1 Create packaging and scripts to install and try Apache Falcon
-    (Srikanth Sundarrajan via Venkatesh Seetharam)
-
-    FALCON-29 Add ability to tag/classify data sets and processes to enable
-    discovery (Venkatesh Seetharam via Srikanth Sundarrajan)
-
-    FALCON-31 File Installation-steps.txt contains old path to repository (Jarek 
-    Jarcec Cecho via Srikanth Sundarrajan)
-
-    FALCON-33 Add patch files to RAT ignore list (Jarek Jarcec Cecho via Srikanth
-    Sundarrajan)
-
-    FALCON-22 Rename ivory to falcon in project (Venkatesh Seetharam via Srikanth
-    Sundarrajan)
-
-    FALCON-19 Bump up the release version to 0.3-SNAPSHOT (Srikanth Sundarrajan 
-    via Venkatesh Seetharam)
-
-    FALCON-13 super pom must reflect license, project website, ML and SCM changes 
-    post incubation (Venkatesh Seetharam via Srikanth Sundarrajan) 
-
-    FALCON-10 Add findbugs plugin and fix findbugs warnings for project (Venkatesh
-    Seetharam via Srikanth Sundarrajan) 
-
-

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/acquisition/pom.xml
----------------------------------------------------------------------
diff --git a/acquisition/pom.xml b/acquisition/pom.xml
deleted file mode 100644
index 38ea259..0000000
--- a/acquisition/pom.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-    
-       http://www.apache.org/licenses/LICENSE-2.0
-        
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-acquisition</artifactId>
-    <description>Apache Falcon Acquisition Module</description>
-    <name>Apache Falcon Acquisition</name>
-    <packaging>jar</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/archival/pom.xml
----------------------------------------------------------------------
diff --git a/archival/pom.xml b/archival/pom.xml
deleted file mode 100644
index b117d9d..0000000
--- a/archival/pom.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-    
-       http://www.apache.org/licenses/LICENSE-2.0
-        
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <artifactId>falcon-archival</artifactId>
-    <description>Apache Falcon Archival Module</description>
-    <name>Apache Falcon Archival</name>
-    <packaging>jar</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/pom.xml
----------------------------------------------------------------------
diff --git a/build-tools/pom.xml b/build-tools/pom.xml
deleted file mode 100644
index 6c8801e..0000000
--- a/build-tools/pom.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.falcon</groupId>
-        <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
-    </parent>
-    <groupId>org.apache.falcon</groupId>
-    <artifactId>build-tools</artifactId>
-    <name>Build Tools</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.testng</groupId>
-            <artifactId>testng</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.codehaus.mojo</groupId>
-                <artifactId>exec-maven-plugin</artifactId>
-                <version>1.2.1</version>
-                <executions>
-                    <execution>
-                        <id>BUILD-OOZIE</id>
-                        <phase>validate</phase>
-                        <goals>
-                            <goal>exec</goal>
-                        </goals>
-                    </execution>
-                </executions>
-                <configuration>
-                    <executable>src/bin/build-oozie.sh</executable>
-                    <arguments>
-                        <argument>${oozie.version}</argument>
-                        <argument>${oozie.buildversion}</argument>
-                        <argument>${oozie.forcebuild}</argument>
-                    </arguments>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/8e49379d/build-tools/src/bin/build-oozie.sh
----------------------------------------------------------------------
diff --git a/build-tools/src/bin/build-oozie.sh b/build-tools/src/bin/build-oozie.sh
deleted file mode 100755
index 889d749..0000000
--- a/build-tools/src/bin/build-oozie.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-set -x
-
-VERSION=$1
-BUILD_VERSION=$2
-FORCE_BUILD=$3
-
-echo "oozie version $OOZIE_VERSION"
-
-if [ -z "${MAVEN_HOME}" ]
-then
-    export MVN_CMD=`which mvn`;
-    if [ -z "${MVN_CMD}" ]
-    then
-        echo "Maven is not installed.Please install and set MAVEN_HOME"
-        exit 1;
-    fi
-else
-    export MVN_CMD=${MAVEN_HOME}/bin/mvn;
-fi
-echo "Using maven from " $MVN_CMD
-
-if [[ ($FORCE_BUILD == 'false') && ( -f `$MVN_CMD help:effective-settings | grep localRepository | cut -d\> -f2 | cut -d\< -f1`/org/apache/oozie/oozie-webapp/$BUILD_VERSION/oozie-webapp-$BUILD_VERSION.war) ]]
-then
-    echo "Oozie already setup. skipping";
-    exit 0;
-fi
-
-PKG_URL="http://archive.apache.org/dist/oozie/$VERSION/oozie-$VERSION.tar.gz"
-PKG=oozie-$VERSION
-
-mkdir -p ../target
-pushd ../target
-rm -rf oozie-*
-
-curl -v $PKG_URL -o oozie-$VERSION.tgz
-tar -xzvf oozie-$VERSION.tgz
-rm oozie-$VERSION.tgz
-cd $PKG
-
-sed -i.bak s/$VERSION\<\\/version\>/$BUILD_VERSION\<\\/version\>/g pom.xml */pom.xml */*/pom.xml
-patch -p0 < ../../build-tools/src/patches/oozie-site.patch
-
-case $VERSION in
-4.1.0 )
-    ;;
-4.2.0 )
-    patch -p1 < ../../build-tools/src/patches/oozie-hadoop2-profile.patch
-    ;;
-esac
-
-rm `find . -name 'pom.xml.bak'`
-
-$MVN_CMD clean source:jar install -DjavaVersion=1.7 -DtargetJavaVersion=1.6 -DskipTests -Phadoop-2
-
-popd