You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by ra...@apache.org on 2015/07/29 19:09:41 UTC

[2/5] falcon git commit: FALCON-1319: Contribute HiveDr, Mirror tests and some test fixes contributed by Namit Maheshwari, Paul Isaychuk, Raghav Kumar Gautam & Ruslan Ostafiychuk

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
new file mode 100644
index 0000000..552c15e
--- /dev/null
+++ b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSourceTargetOptionsTest.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.regression.searchUI;
+
+import org.apache.falcon.cli.FalconCLI;
+import org.apache.falcon.regression.Entities.ClusterMerlin;
+import org.apache.falcon.regression.core.bundle.Bundle;
+import org.apache.falcon.regression.core.helpers.ColoHelper;
+import org.apache.falcon.regression.core.util.BundleUtil;
+import org.apache.falcon.regression.testHelper.BaseUITestClass;
+import org.apache.falcon.regression.ui.search.LoginPage;
+import org.apache.falcon.regression.ui.search.MirrorWizardPage;
+import org.apache.falcon.regression.ui.search.MirrorWizardPage.Location;
+import org.apache.falcon.regression.ui.search.SearchPage;
+import org.apache.falcon.resource.EntityList;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.EnumSet;
+import java.util.Set;
+import java.util.TreeSet;
+
+/** UI tests for mirror creation. */
+@Test(groups = "search-ui")
+public class MirrorSourceTargetOptionsTest extends BaseUITestClass{
+    private final ColoHelper cluster = servers.get(0);
+    private SearchPage searchPage;
+    private MirrorWizardPage mirrorPage;
+    private MirrorWizardPage.ClusterBlock source;
+    private MirrorWizardPage.ClusterBlock target;
+
+    @BeforeClass(alwaysRun = true)
+    public void setup() throws Exception {
+        openBrowser();
+        searchPage = LoginPage.open(getDriver()).doDefaultLogin();
+        bundles[0] = BundleUtil.readELBundle();
+        bundles[0] = new Bundle(bundles[0], cluster);
+        bundles[0].generateUniqueBundle(this);
+        bundles[0].submitClusters(cluster);
+
+    }
+
+    @BeforeMethod(alwaysRun = true)
+    public void refreshMirrorPage() throws Exception {
+        searchPage.refresh();
+        mirrorPage = searchPage.getPageHeader().doCreateMirror();
+        source = mirrorPage.getSourceBlock();
+        target = mirrorPage.getTargetBlock();
+    }
+
+
+    @Test
+    public void testExclusiveWhereToRunJob() {
+        source.selectRunHere();
+        target.selectRunHere();
+        Assert.assertFalse(source.isRunHereSelected(), "'Run job here' shouldn't be selected on Source");
+        Assert.assertTrue(target.isRunHereSelected(), "'Run job here' should be selected on Target");
+
+        source.selectRunHere();
+        Assert.assertTrue(source.isRunHereSelected(), "'Run job here' should be selected on Source");
+        Assert.assertFalse(target.isRunHereSelected(), "'Run job here' shouldn't be selected on Target");
+
+        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY);
+
+        target.selectRunHere();
+        Assert.assertFalse(source.isRunHereSelected(), "'Run job here' shouldn't be selected on Source");
+        Assert.assertTrue(target.isRunHereSelected(), "'Run job here' should be selected on Target");
+
+        source.selectRunHere();
+        Assert.assertTrue(source.isRunHereSelected(), "'Run job here' should be selected on Source");
+        Assert.assertFalse(target.isRunHereSelected(), "'Run job here' shouldn't be selected on Target");
+
+        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
+        source.setLocationType(Location.AZURE);
+        Assert.assertFalse(source.isRunHereAvailable(),
+                "'Run job here' shouldn't be available on source if Source=Azure");
+
+        source.setLocationType(Location.S3);
+        Assert.assertFalse(source.isRunHereAvailable(),
+                "'Run job here' shouldn't be available on source if Source=S3");
+
+        source.setLocationType(Location.HDFS);
+        target.setLocationType(Location.AZURE);
+        Assert.assertFalse(target.isRunHereAvailable(),
+                "'Run job here' shouldn't be available on target if Target=Azure");
+
+        target.setLocationType(Location.S3);
+        Assert.assertFalse(target.isRunHereAvailable(),
+                "'Run job here' shouldn't be available on target if Target=S3");
+
+    }
+
+    @Test
+    public void testExclusiveFSOptions() {
+        source.setLocationType(Location.HDFS);
+        Assert.assertEquals(target.getAvailableLocationTypes(),
+                EnumSet.allOf(Location.class), "All target types should be available if source=HDFS");
+
+
+        source.setLocationType(Location.AZURE);
+        Assert.assertEquals(target.getAvailableLocationTypes(),
+                EnumSet.of(Location.HDFS), "Only HDFS should be available as target if source=Azure");
+
+        source.setLocationType(Location.S3);
+        Assert.assertEquals(target.getAvailableLocationTypes(),
+                EnumSet.of(Location.HDFS), "Only HDFS should be available as target if source=S3");
+
+        source.setLocationType(Location.HDFS);
+        target.setLocationType(Location.HDFS);
+        Assert.assertEquals(target.getAvailableLocationTypes(),
+                EnumSet.allOf(Location.class), "All source types should be available if target=HDFS");
+
+
+        target.setLocationType(Location.AZURE);
+        Assert.assertEquals(source.getAvailableLocationTypes(),
+                EnumSet.of(Location.HDFS), "Only HDFS should be available as source if target=Azure");
+
+        target.setLocationType(Location.S3);
+        Assert.assertEquals(source.getAvailableLocationTypes(),
+                EnumSet.of(Location.HDFS), "Only HDFS should be available as source if target=S3");
+    }
+
+    @Test
+    public void testClustersDropDownList() throws Exception {
+        //add more clusters
+        ClusterMerlin clusterMerlin = bundles[0].getClusterElement();
+        String clusterName = clusterMerlin.getName() + '-';
+        for (int i = 0; i < 5; i++) {
+            clusterMerlin.setName(clusterName + i);
+            prism.getClusterHelper().submitEntity(clusterMerlin.toString());
+        }
+        EntityList result =
+            prism.getClusterHelper().listAllEntities().getEntityList();
+        Assert.assertNotNull(result.getElements(),
+            "There should be more than 5 clusters in result");
+        Set<String> apiClusterNames = new TreeSet<>();
+        for (EntityList.EntityElement element : result.getElements()) {
+            apiClusterNames.add(element.name);
+        }
+
+        //refresh page to get new clusters on UI
+        refreshMirrorPage();
+
+        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
+        source.setLocationType(Location.HDFS);
+        target.setLocationType(Location.HDFS);
+
+        Assert.assertEquals(source.getAvailableClusters(), apiClusterNames,
+            "Clusters available via API are not the same as on Source for HDFS replication");
+        Assert.assertEquals(target.getAvailableClusters(), apiClusterNames,
+            "Clusters available via API are not the same as on Target for HDFS replication");
+
+        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY);
+
+        Assert.assertEquals(source.getAvailableClusters(), apiClusterNames,
+            "Clusters available via API are not the same as on Source for HIVE replication");
+        Assert.assertEquals(target.getAvailableClusters(), apiClusterNames,
+            "Clusters available via API are not the same as on Target for HIVE replication");
+    }
+
+    @Test
+    public void testInvalidValidity() {
+        mirrorPage.setName(bundles[0].getProcessName());
+        mirrorPage.setMirrorType(FalconCLI.RecipeOperation.HDFS_REPLICATION);
+        String baseTestDir = cleanAndGetTestDir();
+        source.setPath(baseTestDir);
+        source.selectCluster(bundles[0].getClusterNames().get(0));
+        target.setPath(baseTestDir);
+        target.selectCluster(bundles[0].getClusterNames().get(0));
+
+        mirrorPage.setStartTime("2010-01-01T02:00Z");
+        mirrorPage.setEndTime("2010-01-01T01:00Z");
+        mirrorPage.next();
+        mirrorPage.save();
+        Assert.assertTrue(mirrorPage.getActiveAlertText().contains("should be before process end"),
+            "Warning about wrong Validity should be present");
+    }
+
+    @AfterClass(alwaysRun = true)
+    public void tearDownClass() {
+        removeTestClassEntities();
+        closeBrowser();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
new file mode 100644
index 0000000..989e4b3
--- /dev/null
+++ b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorSummaryTest.java
@@ -0,0 +1,207 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.regression.searchUI;
+
+import org.apache.falcon.entity.v0.Frequency;
+import org.apache.falcon.entity.v0.process.PolicyType;
+import org.apache.falcon.entity.v0.process.Retry;
+import org.apache.falcon.regression.core.bundle.Bundle;
+import org.apache.falcon.regression.core.helpers.ColoHelper;
+import org.apache.falcon.regression.core.util.BundleUtil;
+import org.apache.falcon.regression.testHelper.BaseUITestClass;
+import org.apache.falcon.regression.ui.search.LoginPage;
+import org.apache.falcon.regression.ui.search.MirrorWizardPage;
+import org.apache.falcon.regression.ui.search.MirrorWizardPage.Summary;
+import org.apache.falcon.regression.ui.search.SearchPage;
+import org.apache.log4j.Logger;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.EnumMap;
+import java.util.Map;
+
+
+/** UI tests for mirror creation. */
+@Test(groups = "search-ui")
+public class MirrorSummaryTest extends BaseUITestClass{
+    private static final Logger LOGGER = Logger.getLogger(MirrorSummaryTest.class);
+
+    private final ColoHelper cluster = servers.get(0);
+    private SearchPage searchPage;
+    private MirrorWizardPage mirrorPage;
+    private String baseTestDir = cleanAndGetTestDir();
+    private String start = "2010-01-01T01:00Z";
+    private String end = "2010-01-01T02:00Z";
+    private Map<Summary, String> baseMap;
+
+    @BeforeClass(alwaysRun = true)
+    public void setupClass() throws Exception {
+        baseMap = new EnumMap<>(Summary.class);
+        baseMap.put(Summary.MAX_MAPS, "5");
+        baseMap.put(Summary.MAX_BANDWIDTH, "100");
+        baseMap.put(Summary.ACL_OWNER, LoginPage.UI_DEFAULT_USER);
+        baseMap.put(Summary.ACL_GROUP, "users");
+        baseMap.put(Summary.ACL_PERMISSIONS, "0x755");
+        baseMap.put(Summary.RETRY_POLICY, "periodic");
+        baseMap.put(Summary.RETRY_DELAY, "30 minutes");
+        baseMap.put(Summary.RETRY_ATTEMPTS, "3");
+        baseMap.put(Summary.FREQUENCY, "5 minutes");
+        baseMap.put(Summary.SOURCE_PATH, baseTestDir);
+        baseMap.put(Summary.TARGET_PATH, baseTestDir);
+        baseMap.put(Summary.START, start);
+        baseMap.put(Summary.END, end);
+
+        //HDFS is default mirror type
+        baseMap.put(Summary.TYPE, "HDFS");
+        baseMap.put(Summary.TAGS, "_falcon_mirroring_type - HDFS");
+        baseMap.put(Summary.SOURCE_LOCATION, "HDFS");
+        baseMap.put(Summary.TARGET_LOCATION, "HDFS");
+
+        openBrowser();
+        searchPage = LoginPage.open(getDriver()).doDefaultLogin();
+    }
+
+    @BeforeMethod(alwaysRun = true)
+    public void setup() throws Exception {
+        removeTestClassEntities();
+        bundles[0] = BundleUtil.readELBundle();
+        bundles[0] = new Bundle(bundles[0], cluster);
+        bundles[0].generateUniqueBundle(this);
+        bundles[0].submitClusters(cluster);
+        searchPage.refresh();
+        mirrorPage = searchPage.getPageHeader().doCreateMirror();
+        MirrorWizardPage.ClusterBlock source = mirrorPage.getSourceBlock();
+        MirrorWizardPage.ClusterBlock target = mirrorPage.getTargetBlock();
+        String clusterName = bundles[0].getClusterNames().get(0);
+        String mirrorName = bundles[0].getProcessName();
+
+        baseMap.put(Summary.RUN_ON, clusterName);
+        baseMap.put(Summary.NAME, mirrorName);
+        baseMap.put(Summary.SOURCE_CLUSTER, clusterName);
+        baseMap.put(Summary.TARGET_CLUSTER, clusterName);
+
+        mirrorPage.setName(mirrorName);
+
+        source.setPath(baseTestDir);
+        source.selectCluster(clusterName);
+        target.setPath(baseTestDir);
+        target.selectCluster(clusterName);
+
+        mirrorPage.setStartTime(start);
+        mirrorPage.setEndTime(end);
+
+    }
+
+    @Test
+    public void testSummaryDefaultScenario() {
+        mirrorPage.next();
+
+        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
+
+
+        LOGGER.info("Actual parameters: " + actualParams);
+        LOGGER.info("Expected parameters: " + baseMap);
+
+        Assert.assertEquals(actualParams, baseMap);
+
+        mirrorPage.save();
+        Assert.assertTrue(mirrorPage.getActiveAlertText().contains("Submit successful"),
+            "Submit should be successful");
+    }
+
+    @Test
+    public void testModificationOnPreviousStep() {
+        mirrorPage.next();
+
+        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
+
+        LOGGER.info("Actual parameters: " + actualParams);
+        LOGGER.info("Expected parameters: " + baseMap);
+
+        Assert.assertEquals(actualParams, baseMap);
+
+        mirrorPage.previous();
+
+        String newPath = baseTestDir + "/new";
+        mirrorPage.getTargetBlock().setPath(newPath);
+
+        Map<Summary, String> expectedParams = new EnumMap<>(baseMap);
+        expectedParams.put(Summary.TARGET_PATH, newPath);
+
+        LOGGER.info("Target path set to " + newPath);
+
+        mirrorPage.next();
+
+        Assert.assertEquals(mirrorPage.getSummaryProperties(), expectedParams);
+
+
+    }
+
+
+    @Test
+    public void testAdvancedScenario() {
+
+        mirrorPage.toggleAdvancedOptions();
+        mirrorPage.setHdfsDistCpMaxMaps("9");
+        mirrorPage.setHdfsMaxBandwidth("50");
+        mirrorPage.setAclOwner("somebody");
+        mirrorPage.setAclGroup("somegroup");
+        mirrorPage.setAclPermission("0x000");
+        mirrorPage.setFrequency(new Frequency("8", Frequency.TimeUnit.hours));
+        Retry retry = new Retry();
+        retry.setAttempts(8);
+        retry.setPolicy(PolicyType.FINAL);
+        retry.setDelay(new Frequency("13", Frequency.TimeUnit.days));
+        mirrorPage.setRetry(retry);
+
+
+        mirrorPage.next();
+
+        Map<Summary, String> actualParams = mirrorPage.getSummaryProperties();
+        Map<Summary, String> expectedParams = new EnumMap<>(baseMap);
+        expectedParams.put(Summary.ACL_OWNER, "somebody");
+        expectedParams.put(Summary.ACL_GROUP, "somegroup");
+        expectedParams.put(Summary.ACL_PERMISSIONS, "0x000");
+        expectedParams.put(Summary.MAX_MAPS, "9");
+        expectedParams.put(Summary.MAX_BANDWIDTH, "50");
+        expectedParams.put(Summary.FREQUENCY, "8 hours");
+        expectedParams.put(Summary.RETRY_ATTEMPTS, "8");
+        expectedParams.put(Summary.RETRY_POLICY, "final");
+        expectedParams.put(Summary.RETRY_DELAY, "13 days");
+
+
+        LOGGER.info("Actual parameters: " + actualParams);
+        LOGGER.info("Expected parameters: " + expectedParams);
+
+        Assert.assertEquals(actualParams, expectedParams);
+
+
+    }
+
+
+    @AfterClass(alwaysRun = true)
+    public void tearDownClass() {
+        removeTestClassEntities();
+        closeBrowser();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
new file mode 100644
index 0000000..c54789b
--- /dev/null
+++ b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/searchUI/MirrorTest.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.regression.searchUI;
+
+import org.apache.falcon.cli.FalconCLI;
+import org.apache.falcon.entity.v0.Frequency;
+import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
+import org.apache.falcon.regression.Entities.ClusterMerlin;
+import org.apache.falcon.regression.Entities.ProcessMerlin;
+import org.apache.falcon.regression.Entities.RecipeMerlin;
+import org.apache.falcon.regression.core.bundle.Bundle;
+import org.apache.falcon.regression.core.enumsAndConstants.MerlinConstants;
+import org.apache.falcon.regression.core.helpers.ColoHelper;
+import org.apache.falcon.regression.core.supportClasses.NotifyingAssert;
+import org.apache.falcon.regression.core.util.AssertUtil;
+import org.apache.falcon.regression.core.util.BundleUtil;
+import org.apache.falcon.regression.core.util.HadoopUtil;
+import org.apache.falcon.regression.core.util.TimeUtil;
+import org.apache.falcon.regression.testHelper.BaseUITestClass;
+import org.apache.falcon.regression.ui.search.LoginPage;
+import org.apache.falcon.regression.ui.search.MirrorWizardPage;
+import org.apache.falcon.regression.ui.search.SearchPage;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hive.hcatalog.api.HCatClient;
+import org.apache.log4j.Logger;
+import org.apache.oozie.client.OozieClient;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.util.Arrays;
+
+/** UI tests for Mirror Setup Wizard. */
+@Test(groups = "search-ui")
+public class MirrorTest extends BaseUITestClass {
+    private static final Logger LOGGER = Logger.getLogger(MirrorTest.class);
+    private final String baseTestDir = cleanAndGetTestDir();
+    private final String hdfsSrcDir = baseTestDir + "/hdfsSrcDir";
+    private final String hdfsTgtDir = baseTestDir + "/hdfsTgtDir";
+    private final String hdfsStrictDir = baseTestDir + "/strictDir";
+    private static final String DB_NAME = "MirrorTest";
+    private static final String DB2_NAME = "MirrorTest2";
+    private static final String TBL1_NAME = "MirrorTable1";
+    private static final String TBL2_NAME = "MirrorTable2";
+    private final ColoHelper cluster = servers.get(0);
+    private final ColoHelper cluster2 = servers.get(1);
+    private final FileSystem clusterFS = serverFS.get(0);
+    private final FileSystem clusterFS2 = serverFS.get(1);
+    private final OozieClient clusterOC = serverOC.get(0);
+    private final OozieClient clusterOC2 = serverOC.get(1);
+    private HCatClient clusterHC;
+    private HCatClient clusterHC2;
+    private RecipeMerlin recipeMerlin;
+    private Connection connection;
+    private Connection connection2;
+    private MirrorWizardPage mirrorPage;
+    /**
+     * Submit one cluster, 2 feeds and 10 processes with 1 to 10 tags (1st process has 1 tag,
+     * 2nd - two tags.. 10th has 10 tags).
+     * @throws URISyntaxException
+     * @throws IOException
+     * @throws AuthenticationException
+     * @throws InterruptedException
+     * @throws JAXBException
+     */
+    @BeforeMethod(alwaysRun = true)
+    public void setup() throws Exception {
+        bundles[0] = new Bundle(BundleUtil.readHCatBundle(), cluster);
+        bundles[1] = new Bundle(BundleUtil.readHCatBundle(), cluster2);
+        bundles[0].generateUniqueBundle(this);
+        bundles[1].generateUniqueBundle(this);
+        final ClusterMerlin srcCluster = bundles[0].getClusterElement();
+        final ClusterMerlin tgtCluster = bundles[1].getClusterElement();
+        Bundle.submitCluster(bundles[0], bundles[1]);
+
+        recipeMerlin = RecipeMerlin.readFromDir("HiveDrRecipe",
+            FalconCLI.RecipeOperation.HIVE_DISASTER_RECOVERY)
+            .withRecipeCluster(srcCluster);
+        recipeMerlin.withSourceCluster(srcCluster)
+            .withTargetCluster(tgtCluster)
+            .withFrequency(new Frequency("5", Frequency.TimeUnit.minutes))
+            .withValidity(TimeUtil.getTimeWrtSystemTime(-5), TimeUtil.getTimeWrtSystemTime(5));
+        recipeMerlin.setUniqueName(this.getClass().getSimpleName());
+        recipeMerlin.withSourceDb(DB_NAME);
+        HadoopUtil.recreateDir(clusterFS, hdfsStrictDir);
+        HadoopUtil.recreateDir(clusterFS2, hdfsStrictDir);
+        clusterFS.setPermission(new Path(hdfsStrictDir), FsPermission.valueOf("drwx------"));
+        clusterFS2.setPermission(new Path(hdfsStrictDir), FsPermission.valueOf("drwx------"));
+        openBrowser();
+        SearchPage searchPage = LoginPage.open(getDriver()).doDefaultLogin();
+        mirrorPage = searchPage.getPageHeader().doCreateMirror();
+        mirrorPage.checkPage();
+    }
+
+
+    @AfterMethod(alwaysRun = true)
+    public void tearDown() throws IOException {
+        removeTestClassEntities();
+        closeBrowser();
+    }
+
+    @Test
+    public void testHeader() throws Exception {
+        mirrorPage.getPageHeader().checkHeader();
+    }
+
+    /**
+     * Create DB on source with 1 table.
+     * Select Dataset type as FileSystem. Select source and target as hdfs.
+     * Populate all fields (name, source, target, validity etc.) with correct and existing values.
+     * Click next. Create mirror.
+     * Using get entity definition API check that entity has been created.
+     * @throws Exception
+     */
+    @Test(enabled = false)
+    public void testHdfsDefaultScenario() throws Exception {
+        final ClusterMerlin srcCluster = bundles[0].getClusterElement();
+        final ClusterMerlin tgtCluster = bundles[1].getClusterElement();
+        RecipeMerlin hdfsRecipe = RecipeMerlin.readFromDir("HdfsRecipe",
+            FalconCLI.RecipeOperation.HDFS_REPLICATION)
+            .withRecipeCluster(srcCluster);
+        hdfsRecipe.withSourceCluster(srcCluster)
+            .withTargetCluster(tgtCluster)
+            .withFrequency(new Frequency("5", Frequency.TimeUnit.minutes))
+            .withValidity(TimeUtil.getTimeWrtSystemTime(-5), TimeUtil.getTimeWrtSystemTime(5));
+        hdfsRecipe.setUniqueName(this.getClass().getSimpleName());
+        hdfsRecipe.withSourceDir(hdfsSrcDir).withTargetDir(hdfsTgtDir);
+        hdfsRecipe.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
+
+        mirrorPage.applyRecipe(hdfsRecipe);
+        mirrorPage.next();
+        mirrorPage.save();
+
+        AssertUtil.assertSucceeded(prism.getProcessHelper().getStatus(
+            createFakeProcessForRecipe(bundles[0].getProcessObject(), recipeMerlin)));
+    }
+
+    /**
+     * Create DB on source with 1 table.
+     * Select Dataset type as Hive.
+     * Populate all fields (name, source, target, validity etc.) with correct and existing values.
+     * Click next. Create mirror.
+     * Using get entity definition API check that entity has been created.
+     * @throws Exception
+     */
+    @Test(dataProvider = "getDbsAndTbls")
+    public void testHiveDefaultScenario(String dbName, String tblName) throws Exception {
+        recipeMerlin.withSourceDb(dbName);
+        recipeMerlin.withSourceTable(tblName);
+        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
+        mirrorPage.applyRecipe(recipeMerlin);
+        mirrorPage.next();
+        mirrorPage.save();
+        AssertUtil.assertSucceeded(prism.getProcessHelper().getStatus(
+            createFakeProcessForRecipe(bundles[0].getProcessObject(), recipeMerlin)));
+    }
+
+    @DataProvider
+    public Object[][] getDbsAndTbls() {
+        return new String[][]{
+            {DB_NAME, ""},
+            {DB_NAME + ',' + DB2_NAME, ""},
+            {DB_NAME, TBL1_NAME + ',' + TBL2_NAME},
+        };
+    }
+
+    /**
+     * Test recipe with bad acls.
+     * Set owner/group as invalid string (utf-8, special chars, number).
+     * Check that user is not allowed to go to the next step and has been notified with an alert.
+     * Set permissions as 4digit number, negative, string, 000. Check the same.
+     */
+    @Test(enabled = false)
+    public void testInvalidAcl() {
+        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
+        final String goodAclOwner = MerlinConstants.CURRENT_USER_NAME;
+        final String goodAclGroup = MerlinConstants.CURRENT_USER_GROUP;
+        final String goodAclPerms = "777";
+        mirrorPage.applyRecipe(recipeMerlin);
+        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
+        for(String badAclOwner: new String[] {"utf8\u20ACchar", "speci@l", "123"}) {
+            mirrorPage.setAclOwner(badAclOwner);
+            notifyingAssert.assertTrue(mirrorPage.isAclOwnerWarningDisplayed(),
+                "Expecting invalid owner warning to be displayed for bad acl owner: " + badAclOwner);
+            mirrorPage.next(); //should not go through
+            if (mirrorPage.getStepNumber() == 2) {
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setAclOwner(goodAclOwner);
+            notifyingAssert.assertFalse(mirrorPage.isAclOwnerWarningDisplayed(),
+                "Expecting invalid owner warning to not be displayed for good acl owner: " + goodAclOwner);
+        }
+
+        for(String badAclGroup: new String[] {"utf8\u20ACchar", "speci@l", "123"}) {
+            mirrorPage.setAclGroup(badAclGroup);
+            notifyingAssert.assertTrue(mirrorPage.isAclGroupWarningDisplayed(),
+                "Expecting invalid group warning to be displayed for bad acl group: " + badAclGroup);
+            mirrorPage.next(); //should not go through
+            if (mirrorPage.getStepNumber() == 2) {
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setAclGroup(goodAclGroup);
+            notifyingAssert.assertFalse(mirrorPage.isAclGroupWarningDisplayed(),
+                "Expecting invalid group warning to not be displayed for good acl group: " + goodAclGroup);
+        }
+
+        for(String badAclPermission: new String[] {"1234", "-123", "str", "000", "1*", "*1"}) {
+            mirrorPage.setAclPermission(badAclPermission);
+            notifyingAssert.assertTrue(mirrorPage.isAclPermissionWarningDisplayed(),
+                "Expecting invalid permission warning to be displayed for bad acl permission: " + badAclPermission);
+            mirrorPage.next(); //should not go through
+            if (mirrorPage.getStepNumber() == 2) {
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setAclPermission(goodAclPerms); //clear error
+            notifyingAssert.assertFalse(mirrorPage.isAclPermissionWarningDisplayed(),
+                "Expecting invalid permission warning to not be displayed for good acl permission: " + goodAclPerms);
+        }
+        notifyingAssert.assertAll();
+    }
+
+    /**
+     * Select Hive as dataset type.
+     * Set source/target staging paths as path with invalid pattern, digit, empty value, special/utf-8 symbols. Check
+     * that user is not allowed
+     to go to the next step and has been notified with an alert.
+     */
+    @Test(enabled = false)
+    public void testHiveAdvancedInvalidStaging() {
+        recipeMerlin.withSourceDb(DB_NAME);
+        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
+        mirrorPage.applyRecipe(recipeMerlin);
+        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
+        final String goodSrcStaging = recipeMerlin.getSrcCluster().getLocation(ClusterLocationType.STAGING).getPath();
+        final String goodTgtStaging = recipeMerlin.getTgtCluster().getLocation(ClusterLocationType.STAGING).getPath();
+        final String[] badTestPaths = new String[] {"not_a_path", "", "not/allowed"};
+        for (String path : badTestPaths) {
+            mirrorPage.setSourceStaging(path);
+            //check error
+            mirrorPage.next();
+            if (mirrorPage.getStepNumber() == 2) {
+                notifyingAssert.fail(
+                    "Navigation to page 2 should not be allowed as source staging path is bad: " + path);
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setSourceStaging(goodSrcStaging);
+            //check error disappeared
+        }
+        for (String path : badTestPaths) {
+            mirrorPage.setTargetStaging(path);
+            //check error
+            mirrorPage.next();
+            if (mirrorPage.getStepNumber() == 2) {
+                notifyingAssert.fail(
+                    "Navigation to page 2 should not be allowed as target staging path is bad: " + path);
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setTargetStaging(goodTgtStaging);
+            //check error disappeared
+        }
+        notifyingAssert.assertAll();
+    }
+
+    /**
+     * Select Hive as dataset type.
+     * Set source/target staging paths as path pointing to directories with strict permissions
+     * (another owner, 700 permissions).
+     * Check that user is not allowed to go to the next step and has been notified with an alert.
+     */
+    @Test(enabled = false)
+    public void testHiveAdvancedStagingAcl() throws Exception {
+        recipeMerlin.withSourceDb(DB_NAME);
+        recipeMerlin.setTags(Arrays.asList("key1=val1", "key2=val2", "key3=val3"));
+        mirrorPage.applyRecipe(recipeMerlin);
+        NotifyingAssert notifyingAssert = new NotifyingAssert(true);
+        final String goodSrcStaging = recipeMerlin.getSrcCluster().getLocation(ClusterLocationType.STAGING).getPath();
+        final String goodTgtStaging = recipeMerlin.getTgtCluster().getLocation(ClusterLocationType.STAGING).getPath();
+        final String[] badTestPaths = new String[] {"/apps", hdfsStrictDir};
+        for (String path : badTestPaths) {
+            mirrorPage.setSourceStaging(path);
+            //check error
+            mirrorPage.next();
+            if (mirrorPage.getStepNumber() == 2) {
+                notifyingAssert.fail(
+                    "Navigation to page 2 should not be allowed as source staging path is bad: " + path
+                        + " (" + clusterFS.getFileStatus(new Path(path)) + ")");
+
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setSourceStaging(goodSrcStaging);
+            //check error disappeared
+        }
+        for (String path : badTestPaths) {
+            mirrorPage.setTargetStaging(path);
+            //check error
+            mirrorPage.next();
+            if (mirrorPage.getStepNumber() == 2) {
+                notifyingAssert.fail(
+                    "Navigation to page 2 should not be allowed as target staging path is bad: " + path
+                        + " (" + clusterFS.getFileStatus(new Path(path)) + ")");
+                mirrorPage.silentPrevious();
+                mirrorPage.toggleAdvancedOptions();
+            }
+            mirrorPage.setTargetStaging(goodTgtStaging);
+            //check error disappeared
+        }
+        notifyingAssert.assertAll();
+    }
+
+    /**
+     * Hack to work with process corresponding to recipe.
+     * @param processMerlin process merlin to be modified
+     *                      (ideally we want to get rid of this and use recipe to generate a fake process xml)
+     * @param recipe recipe object that need to be faked
+     * @return
+     */
+    private String createFakeProcessForRecipe(ProcessMerlin processMerlin, RecipeMerlin recipe) {
+        processMerlin.setName(recipe.getName());
+        return processMerlin.toString();
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/FalconClientTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/FalconClientTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/FalconClientTest.java
index fec9d83..d11411b 100644
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/FalconClientTest.java
+++ b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/FalconClientTest.java
@@ -86,7 +86,7 @@ public class FalconClientTest extends BaseTestClass {
         final String clusterXml = bundles[0].getClusters().get(0);
         final ExecResult execResult =
             prism.getClusterHelper().clientDelete(clusterXml, MerlinConstants.DIFFERENT_USER_NAME);
-        AssertUtil.assertFailed(execResult, "cluster deletion failed");
+        AssertUtil.assertFailed(execResult, "ERROR: Forbidden;");
     }
 
     @AfterMethod(alwaysRun = true)

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/ProcessAclTest.java
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/ProcessAclTest.java b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/ProcessAclTest.java
index eb05859..48cfe54 100644
--- a/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/ProcessAclTest.java
+++ b/falcon-regression/merlin/src/test/java/org/apache/falcon/regression/security/ProcessAclTest.java
@@ -26,10 +26,12 @@ import org.apache.falcon.regression.core.helpers.entity.AbstractEntityHelper;
 import org.apache.falcon.regression.core.util.AssertUtil;
 import org.apache.falcon.regression.core.util.BundleUtil;
 import org.apache.falcon.regression.core.util.HadoopUtil;
+import org.apache.falcon.regression.core.util.InstanceUtil;
 import org.apache.falcon.regression.core.util.MatrixUtil;
 import org.apache.falcon.regression.core.util.OSUtil;
 import org.apache.falcon.regression.testHelper.BaseTestClass;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.oozie.client.OozieClient;
 import org.joda.time.DateTime;
 import org.joda.time.DateTimeZone;
 import org.testng.Assert;
@@ -49,6 +51,7 @@ public class ProcessAclTest extends BaseTestClass {
 
     private ColoHelper cluster = servers.get(0);
     private FileSystem clusterFS = serverFS.get(0);
+    private OozieClient clusterOC = serverOC.get(0);
     private String baseTestDir = cleanAndGetTestDir();
     private String aggregateWorkflowDir = baseTestDir + "/aggregator";
     private String feedInputPath = baseTestDir + "/input" + MINUTE_DATE_PATTERN;
@@ -167,6 +170,7 @@ public class ProcessAclTest extends BaseTestClass {
     public void othersEditScheduledProcess(final String user, final EntityOp op, boolean isAllowed)
         throws Exception {
         bundles[0].submitFeedsScheduleProcess();
+        InstanceUtil.waitTillInstancesAreCreated(clusterOC, bundles[0].getProcessData(), 0);
         if (op == EntityOp.resume) {
             processHelper.suspend(processString);
         } else if (op == EntityOp.update) {

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-template.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-template.xml b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-template.xml
new file mode 100644
index 0000000..c644b99
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-template.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<process name="##falcon.recipe.job.name##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##falcon.recipe.cluster.name##">
+            <validity end="##falcon.recipe.cluster.validity.end##" start="##falcon.recipe.cluster.validity.start##"/>
+        </cluster>
+    </clusters>
+
+    <tags>_falcon_mirroring_type=HDFS</tags>
+
+    <parallel>1</parallel>
+    <!-- Dir replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##falcon.recipe.frequency##</frequency>
+    <timezone>UTC</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##falcon.recipe.workflow.name##" engine="oozie" path="/apps/data-mirroring/workflows/hdfs-replication-workflow.xml" lib="##workflow.lib.path##"/>
+    <retry policy="##falcon.recipe.retry.policy##" delay="##falcon.recipe.retry.delay##" attempts="3"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-workflow.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-workflow.xml b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-workflow.xml
new file mode 100644
index 0000000..aa4d5b0
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery-workflow.xml
@@ -0,0 +1,120 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-fs-workflow'>
+    <start to='dr-replication'/>
+    <!-- Replication action -->
+    <action name="dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.oozie.libpath</name>
+                    <value>${wf:conf("falcon.libpath")}</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.replication.FeedReplicator</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-maxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-mapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-sourcePaths</arg>
+            <arg>${drSourceClusterFS}${drSourceDir}</arg>
+            <arg>-targetPath</arg>
+            <arg>${drTargetClusterFS}${drTargetDir}</arg>
+            <arg>-falconFeedStorageType</arg>
+            <arg>FILESYSTEM</arg>
+            <arg>-availabilityFlag</arg>
+            <arg>${availabilityFlag == 'NA' ? "NA" : availabilityFlag}</arg>
+            <arg>-counterLogDir</arg>
+            <arg>${logDir}/job-${nominalTime}</arg>
+        </java>
+        <ok to="success"/>
+        <error to="failure"/>
+    </action>
+    <decision name="success">
+        <switch>
+            <case to="successAlert">
+                ${drNotificationReceivers ne 'NA'}
+            </case>
+            <default to="end"/>
+        </switch>
+    </decision>
+    <decision name="failure">
+        <switch>
+            <case to="failureAlert">
+                ${drNotificationReceivers ne 'NA'}
+            </case>
+            <default to="fail"/>
+        </switch>
+    </decision>
+    <action name="successAlert">
+        <email xmlns="uri:oozie:email-action:0.2">
+            <to>${drNotificationReceivers}</to>
+            <subject>INFO: HDFS DR workflow ${entityName} completed successfully</subject>
+            <body>
+                The HDFS DR workflow ${wf:id()} is successful.
+                Source      =   ${drSourceClusterFS}${drSourceDir}
+                Target      =   ${drTargetClusterFS}${drTargetDir}
+            </body>
+        </email>
+        <ok to="end"/>
+        <error to="end"/>
+    </action>
+    <action name="failureAlert">
+        <email xmlns="uri:oozie:email-action:0.2">
+            <to>${drNotificationReceivers}</to>
+            <subject>ERROR: HDFS DR workflow ${entityName} failed</subject>
+            <body>
+                The workflow ${wf:id()} had issues and was killed.  The error message is: ${wf:errorMessage(wf:lastErrorNode())}
+                Source      =   ${drSourceClusterFS}${drSourceDir}
+                Target      =   ${drTargetClusterFS}${drTargetDir}
+            </body>
+        </email>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>
+            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+        </message>
+    </kill>
+    <end name="end"/>
+</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery.properties
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery.properties b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery.properties
new file mode 100644
index 0000000..99f748d
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HdfsRecipe/hive-disaster-recovery.properties
@@ -0,0 +1,75 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##### NOTE: This is a TEMPLATE file which can be copied and edited
+
+##### Recipe properties
+##### Unique recipe job name
+falcon.recipe.name=hdfs-replication-monthly
+
+##### Workflow properties
+falcon.recipe.workflow.name=hdfs-dr-workflow
+# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
+falcon.recipe.workflow.path=/apps/data-mirroring/workflows/hdfs-replication-workflow.xml
+# Provide Wf lib absolute path. This can be HDFS or local FS path. If libs are on local FS it will be copied to HDFS
+#falcon.recipe.workflow.lib.path=/recipes/hdfs-replication/lib
+
+##### Cluster properties
+# Cluster where job should run
+falcon.recipe.cluster.name=primaryCluster
+# Change the cluster hdfs write end point here. This is mandatory.
+falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://240.0.0.10:8020
+# Change the cluster validity start time here
+falcon.recipe.cluster.validity.start=2015-03-13T00:00Z
+# Change the cluster validity end time here
+falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
+
+##### Scheduling properties
+# Change the recipe frequency here. Valid frequency type are minutes, hours, days, months
+falcon.recipe.process.frequency=minutes(5)
+
+##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
+##### Uncomment to add tags
+#falcon.recipe.tags=
+
+##### Retry policy properties
+
+falcon.recipe.retry.policy=periodic
+falcon.recipe.retry.delay=minutes(30)
+falcon.recipe.retry.attempts=3
+
+##### ACL properties - Uncomment and change ACL if authorization is enabled
+
+falcon.recipe.acl.owner=ambari-qa
+falcon.recipe.acl.group=users
+falcon.recipe.acl.permission=0x755
+
+##### Custom Job properties
+
+drSourceDir=/user/falcon_qa/dr/test/primaryCluster/input
+drSourceClusterFS=hdfs://240.0.0.10:8020
+drTargetDir=/user/falcon_qa/dr/test/backupCluster/input
+drTargetClusterFS=hdfs://240.0.0.11:8020
+
+# Change it to specify the maximum number of mappers for DistCP
+distcpMaxMaps=1
+# Change it to specify the bandwidth in MB for each mapper in DistCP
+distcpMapBandwidth=100
+
+##### Email on failure
+drNotificationReceivers=NA
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-template.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-template.xml b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-template.xml
new file mode 100644
index 0000000..3afbef0
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-template.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##name##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##cluster.name##">
+            <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
+        </cluster>
+    </clusters>
+
+    <tags>_falcon_mirroring_type=HIVE</tags>
+
+    <parallel>1</parallel>
+    <!-- Replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##process.frequency##</frequency>
+    <timezone>UTC</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##workflow.name##" engine="oozie"
+              path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
+    <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-workflow.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-workflow.xml b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-workflow.xml
new file mode 100644
index 0000000..c441998
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery-workflow.xml
@@ -0,0 +1,293 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
+    <start to='last-event'/>
+    <action name="last-event">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-drJobName</arg>
+            <arg>${drJobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>lastevents</arg>
+        </java>
+        <ok to="export-dr-replication"/>
+        <error to="failure"/>
+    </action>
+    <!-- Export Replication action -->
+    <action name="export-dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-drJobName</arg>
+            <arg>${drJobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>export</arg>
+        </java>
+        <ok to="import-dr-replication"/>
+        <error to="failure"/>
+    </action>
+    <!-- Import Replication action -->
+    <action name="import-dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-drJobName</arg>
+            <arg>${drJobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>import</arg>
+        </java>
+        <ok to="success"/>
+        <error to="failure"/>
+    </action>
+    <decision name="success">
+        <switch>
+            <case to="successAlert">
+                ${drNotificationReceivers ne 'NA'}
+            </case>
+            <default to="end"/>
+        </switch>
+    </decision>
+    <decision name="failure">
+        <switch>
+            <case to="failureAlert">
+                ${drNotificationReceivers ne 'NA'}
+            </case>
+            <default to="fail"/>
+        </switch>
+    </decision>
+    <action name="successAlert">
+        <email xmlns="uri:oozie:email-action:0.2">
+            <to>${drNotificationReceivers}</to>
+            <subject>INFO: Hive DR workflow ${drJobName} completed successfully</subject>
+            <body>
+                The Hive DR workflow ${wf:id()} is successful.
+                Source          = ${sourceCluster}
+                Target          = ${targetCluster}
+                DB Name         = ${sourceDatabase}
+                Table Name      = ${sourceTable}
+            </body>
+        </email>
+        <ok to="end"/>
+        <error to="end"/>
+    </action>
+    <action name="failureAlert">
+        <email xmlns="uri:oozie:email-action:0.2">
+            <to>${drNotificationReceivers}</to>
+            <subject>ERROR: Hive DR workflow ${drJobName} failed</subject>
+            <body>
+                The Hive DR workflow ${wf:id()} had issues and was killed.  The error message is: ${wf:errorMessage(wf:lastErrorNode())}
+                Source          = ${sourceCluster}
+                Target          = ${targetCluster}
+                DB Name         = ${sourceDatabase}
+                Table Name      = ${sourceTable}
+            </body>
+        </email>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>
+            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+        </message>
+    </kill>
+    <end name="end"/>
+</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery.properties
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery.properties b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery.properties
new file mode 100644
index 0000000..de7f7f9
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HiveDrRecipe/hive-disaster-recovery.properties
@@ -0,0 +1,95 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##### NOTE: This is a TEMPLATE file which can be copied and edited
+
+##### Recipe properties
+falcon.recipe.name=hive-disaster-recovery
+
+
+##### Workflow properties
+falcon.recipe.workflow.name=hive-dr-workflow
+# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
+falcon.recipe.workflow.path=/recipes/hive-replication/hive-disaster-recovery-workflow.xml
+
+##### Cluster properties
+
+# Change the cluster name where replication job should run here
+falcon.recipe.cluster.name=backupCluster
+# Change the cluster hdfs write end point here. This is mandatory.
+falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://localhost:8020
+# Change the cluster validity start time here
+falcon.recipe.cluster.validity.start=2014-10-01T00:00Z
+# Change the cluster validity end time here
+falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
+
+##### Scheduling properties
+
+# Change the process frequency here. Valid frequency type are minutes, hours, days, months
+falcon.recipe.process.frequency=minutes(60)
+
+##### Retry policy properties
+
+falcon.recipe.retry.policy=periodic
+falcon.recipe.retry.delay=minutes(30)
+falcon.recipe.retry.attempts=3
+
+##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
+##### Uncomment to add tags
+#falcon.recipe.tags=owner=landing,pipeline=adtech
+
+##### ACL properties - Uncomment and change ACL if authorization is enabled
+
+#falcon.recipe.acl.owner=testuser
+#falcon.recipe.acl.group=group
+#falcon.recipe.acl.permission=0x755
+
+##### Custom Job properties
+
+##### Source Cluster DR properties
+sourceCluster=primaryCluster
+sourceMetastoreUri=thrift://localhost:9083
+sourceHiveServer2Uri=hive2://localhost:10000
+# For DB level replicaiton to replicate multiple databases specify comma separated list of tables
+sourceDatabase=default
+# For DB level replication specify * for sourceTable.
+# For table level replication to replicate multiple tables specify comma separated list of tables
+sourceTable=testtable_dr
+sourceStagingPath=/apps/hive/tools/dr
+sourceNN=hdfs://localhost:8020
+
+##### Target Cluster DR properties
+targetCluster=backupCluster
+targetMetastoreUri=thrift://localhost:9083
+targetHiveServer2Uri=hive2://localhost:10000
+targetStagingPath=/apps/hive/tools/dr
+targetNN=hdfs://localhost:8020
+
+# To ceil the max events processed each time job runs. Set it to max value depending on your bandwidth limit.
+# Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!
+maxEvents=-1
+# Change it to specify the maximum number of mappers for replication
+replicationMaxMaps=5
+# Change it to specify the maximum number of mappers for DistCP
+distcpMaxMaps=1
+# Change it to specify the bandwidth in MB for each mapper in DistCP
+distcpMapBandwidth=100
+
+##### Email on failure
+drNotificationReceivers=NA
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/f9669000/falcon-regression/merlin/src/test/resources/HiveDrSecureRecipe/hive-disaster-recovery-secure-template.xml
----------------------------------------------------------------------
diff --git a/falcon-regression/merlin/src/test/resources/HiveDrSecureRecipe/hive-disaster-recovery-secure-template.xml b/falcon-regression/merlin/src/test/resources/HiveDrSecureRecipe/hive-disaster-recovery-secure-template.xml
new file mode 100644
index 0000000..3afbef0
--- /dev/null
+++ b/falcon-regression/merlin/src/test/resources/HiveDrSecureRecipe/hive-disaster-recovery-secure-template.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##name##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##cluster.name##">
+            <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
+        </cluster>
+    </clusters>
+
+    <tags>_falcon_mirroring_type=HIVE</tags>
+
+    <parallel>1</parallel>
+    <!-- Replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##process.frequency##</frequency>
+    <timezone>UTC</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##workflow.name##" engine="oozie"
+              path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
+    <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
+    <ACL/>
+</process>