You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by ba...@apache.org on 2016/04/13 01:05:55 UTC

[1/3] falcon git commit: FALCON-1107 Move trusted extensions processing to server side

Repository: falcon
Updated Branches:
  refs/heads/master c52961c6a -> 95bf312f4


http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java
new file mode 100644
index 0000000..49b3a12
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtension.java
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.mirroring.hive;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.catalog.AbstractCatalogService;
+import org.apache.falcon.catalog.CatalogServiceFactory;
+import org.apache.falcon.entity.ClusterHelper;
+import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.extensions.AbstractExtension;
+import org.apache.falcon.extensions.ExtensionProperties;
+import org.apache.falcon.security.SecurityUtil;
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.Properties;
+
+/**
+ * Hive mirroring extension.
+ */
+public class HiveMirroringExtension extends AbstractExtension {
+    private static final String EXTENSION_NAME = "HIVE-MIRRORING";
+    private static final String ALL_TABLES = "*";
+    private static final String COMMA_DELIMITER = ",";
+    private static final String SECURE_RESOURCE = "-secure";
+
+    @Override
+    public String getName() {
+        return EXTENSION_NAME;
+    }
+
+    @Override
+    public void validate(final Properties extensionProperties) throws FalconException {
+        for (HiveMirroringExtensionProperties property : HiveMirroringExtensionProperties.values()) {
+            if (extensionProperties.getProperty(property.getName()) == null && property.isRequired()) {
+                throw new FalconException("Missing extension property: " + property.getName());
+            }
+        }
+
+        Cluster srcCluster = ClusterHelper.getCluster(HiveMirroringExtensionProperties.SOURCE_CLUSTER.getName());
+        if (srcCluster == null) {
+            throw new FalconException("Cluster entity " + HiveMirroringExtensionProperties.SOURCE_CLUSTER.getName()
+                    + " not found");
+        }
+        String srcClusterCatalogUrl = ClusterHelper.getRegistryEndPoint(srcCluster);
+        Configuration srcClusterConf = ClusterHelper.getConfiguration(srcCluster);
+
+        // Validate if DB exists - source and target
+        String sourceDbList = extensionProperties.getProperty(
+                HiveMirroringExtensionProperties.SOURCE_DATABASES.getName());
+
+        if (StringUtils.isBlank(sourceDbList)) {
+            throw new FalconException("No source DB specified for Hive mirroring");
+        }
+
+        AbstractCatalogService catalogService = CatalogServiceFactory.getCatalogService();
+        String[] srcDbs = sourceDbList.split(COMMA_DELIMITER);
+        if (srcDbs.length <= 0) {
+            throw new FalconException("No source DB specified for Hive mirroring");
+        }
+        for (String db : srcDbs) {
+            if (!catalogService.dbExists(srcClusterConf, srcClusterCatalogUrl, db)) {
+                throw new FalconException("Database " + db + " doesn't exist on cluster" + srcCluster.getName());
+            }
+        }
+
+        String sourceTableList = extensionProperties.getProperty(
+                HiveMirroringExtensionProperties.SOURCE_TABLES.getName());
+        if (StringUtils.isNotBlank(sourceTableList)) {
+            if (!sourceTableList.equals(ALL_TABLES)) {
+                String db = srcDbs[0];
+                String[] srcTables = sourceTableList.split(COMMA_DELIMITER);
+                for (String table : srcTables) {
+                    if (!catalogService.tableExists(srcClusterConf, srcClusterCatalogUrl, db, table)) {
+                        throw new FalconException("Table " + table + " doesn't exist on cluster"
+                                + srcCluster.getName());
+                    }
+                }
+            }
+        }
+
+        // Verify db exists on target
+        Cluster targetCluster = ClusterHelper.getCluster(HiveMirroringExtensionProperties.TARGET_CLUSTER.getName());
+        if (targetCluster == null) {
+            throw new FalconException("Cluster entity " + HiveMirroringExtensionProperties.TARGET_CLUSTER.getName()
+                    + " not found");
+        }
+        String targetClusterCatalogUrl = ClusterHelper.getRegistryEndPoint(targetCluster);
+        Configuration targetClusterConf = ClusterHelper.getConfiguration(targetCluster);
+
+        for (String db : srcDbs) {
+            if (!catalogService.dbExists(targetClusterConf, targetClusterCatalogUrl, db)) {
+                throw new FalconException("Database " + db + " doesn't exist on cluster" + targetCluster.getName());
+            }
+        }
+    }
+
+    @Override
+    public Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException {
+        Properties additionalProperties = new Properties();
+
+        String jobName = extensionProperties.getProperty(ExtensionProperties.JOB_NAME.getName());
+        // Add job name as Hive DR job
+        additionalProperties.put(HiveMirroringExtensionProperties.HIVE_MIRRORING_JOB_NAME.getName(),
+                jobName + System.currentTimeMillis());
+
+        // Add required properties of cluster where job should run
+        additionalProperties.put(HiveMirroringExtensionProperties.CLUSTER_FOR_JOB_RUN.getName(),
+                extensionProperties.getProperty(ExtensionProperties.CLUSTER_NAME.getName()));
+        Cluster jobCluster = ClusterHelper.getCluster(ExtensionProperties.CLUSTER_NAME.getName());
+        if (jobCluster == null) {
+            throw new FalconException("Cluster entity " + ExtensionProperties.CLUSTER_NAME.getName()
+                    + " not found");
+        }
+        additionalProperties.put(HiveMirroringExtensionProperties.CLUSTER_FOR_JOB_RUN_WRITE_EP.getName(),
+                ClusterHelper.getStorageUrl(jobCluster));
+        if (SecurityUtil.isSecurityEnabled()) {
+            // Add -secure and update the resource name
+            String resourceName = getName().toLowerCase() + SECURE_RESOURCE;
+            additionalProperties.put(ExtensionProperties.RESOURCE_NAME.getName(), resourceName);
+            additionalProperties.put(HiveMirroringExtensionProperties.CLUSTER_FOR_JOB_NN_KERBEROS_PRINCIPAL.getName(),
+                    ClusterHelper.getPropertyValue(jobCluster, SecurityUtil.NN_PRINCIPAL));
+        }
+
+        // Properties for src cluster
+        Cluster srcCluster = ClusterHelper.getCluster(HiveMirroringExtensionProperties.SOURCE_CLUSTER.getName());
+        if (srcCluster == null) {
+            throw new FalconException("Cluster entity " + HiveMirroringExtensionProperties.SOURCE_CLUSTER.getName()
+                    + " not found");
+        }
+        additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_METASTORE_URI.getName(),
+                ClusterHelper.getRegistryEndPoint(srcCluster));
+        additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_NN.getName(),
+                ClusterHelper.getStorageUrl(srcCluster));
+
+        String sourceTableList = extensionProperties.getProperty(
+                HiveMirroringExtensionProperties.SOURCE_TABLES.getName());
+        if (StringUtils.isBlank(sourceTableList)) {
+            additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_TABLES.getName(), ALL_TABLES);
+        }
+
+        if (SecurityUtil.isSecurityEnabled()) {
+            String hive2Principal = extensionProperties.getProperty(HiveMirroringExtensionProperties
+                    .SOURCE_HIVE2_KERBEROS_PRINCIPAL.getName());
+            if (StringUtils.isBlank(hive2Principal)) {
+                throw new FalconException("Hive server2 kerberos principal for cluster " + srcCluster.getName()
+                        + "not passed for extension " + jobName);
+            }
+
+            additionalProperties.put(HiveMirroringExtensionProperties.SOURCE_NN_KERBEROS_PRINCIPAL.getName(),
+                    ClusterHelper.getPropertyValue(srcCluster, SecurityUtil.NN_PRINCIPAL));
+            additionalProperties.put(
+                    HiveMirroringExtensionProperties.SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL.getName(),
+                    ClusterHelper.getPropertyValue(srcCluster, SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL));
+        }
+
+        // Properties for target cluster
+        Cluster targetCluster = ClusterHelper.getCluster(HiveMirroringExtensionProperties.TARGET_CLUSTER.getName());
+        if (targetCluster == null) {
+            throw new FalconException("Cluster entity " + HiveMirroringExtensionProperties.TARGET_CLUSTER.getName()
+                    + " not found");
+        }
+        additionalProperties.put(HiveMirroringExtensionProperties.TARGET_METASTORE_URI.getName(),
+                ClusterHelper.getRegistryEndPoint(targetCluster));
+        additionalProperties.put(HiveMirroringExtensionProperties.TARGET_NN.getName(),
+                ClusterHelper.getStorageUrl(targetCluster));
+
+        if (SecurityUtil.isSecurityEnabled()) {
+            String hive2Principal = extensionProperties.getProperty(HiveMirroringExtensionProperties
+                    .TARGET_HIVE2_KERBEROS_PRINCIPAL.getName());
+            if (StringUtils.isBlank(hive2Principal)) {
+                throw new FalconException("Hive server2 kerberos principal for cluster " + targetCluster.getName()
+                        + "not passed for extension " + jobName);
+            }
+
+            additionalProperties.put(HiveMirroringExtensionProperties.TARGET_NN_KERBEROS_PRINCIPAL.getName(),
+                    ClusterHelper.getPropertyValue(targetCluster, SecurityUtil.NN_PRINCIPAL));
+            additionalProperties.put(
+                    HiveMirroringExtensionProperties.TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL.getName(),
+                    ClusterHelper.getPropertyValue(targetCluster, SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL));
+        }
+
+        // Misc properties
+        // Add default properties if not passed
+        String maxEvents = extensionProperties.getProperty(HiveMirroringExtensionProperties.MAX_EVENTS.getName());
+        if (StringUtils.isBlank(maxEvents)) {
+            additionalProperties.put(HiveMirroringExtensionProperties.MAX_EVENTS.getName(), "-1");
+        }
+
+        String replicationMaxMaps =
+                extensionProperties.getProperty(HiveMirroringExtensionProperties.MAX_MAPS.getName());
+        if (StringUtils.isBlank(replicationMaxMaps)) {
+            additionalProperties.put(HiveMirroringExtensionProperties.MAX_MAPS.getName(), "5");
+        }
+
+        String distcpMaxMaps = extensionProperties.getProperty(
+                HiveMirroringExtensionProperties.DISTCP_MAX_MAPS.getName());
+        if (StringUtils.isBlank(distcpMaxMaps)) {
+            additionalProperties.put(HiveMirroringExtensionProperties.DISTCP_MAX_MAPS.getName(), "1");
+        }
+
+        String distcpMapBandwidth = extensionProperties.getProperty(
+                HiveMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName());
+        if (StringUtils.isBlank(distcpMapBandwidth)) {
+            additionalProperties.put(HiveMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName(), "100");
+        }
+
+        if (StringUtils.isBlank(
+                extensionProperties.getProperty(HiveMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName()))) {
+            additionalProperties.put(HiveMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName(), "false");
+        }
+
+        return additionalProperties;
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java
new file mode 100644
index 0000000..6c4f58d
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hive/HiveMirroringExtensionProperties.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.mirroring.hive;
+
+/**
+ * Hive mirroring extension properties.
+ */
+
+public enum HiveMirroringExtensionProperties {
+    SOURCE_CLUSTER("sourceCluster", "Replication source cluster name"),
+    SOURCE_METASTORE_URI("sourceMetastoreUri", "Source Hive metastore uri", false),
+    SOURCE_HS2_URI("sourceHiveServer2Uri", "Source HS2 uri"),
+    SOURCE_DATABASES("sourceDatabases", "List of databases to replicate"),
+    SOURCE_TABLES("sourceTables", "List of tables to replicate", false),
+    SOURCE_STAGING_PATH("sourceStagingPath", "Location of source staging path", false),
+    SOURCE_NN("sourceNN", "Source name node", false),
+    SOURCE_NN_KERBEROS_PRINCIPAL("sourceNNKerberosPrincipal", "Source name node kerberos principal", false),
+    SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL("sourceHiveMetastoreKerberosPrincipal",
+            "Source hive metastore kerberos principal", false),
+    SOURCE_HIVE2_KERBEROS_PRINCIPAL("sourceHive2KerberosPrincipal",
+            "Source hiveserver2 kerberos principal", false),
+
+    TARGET_CLUSTER("targetCluster", "Target cluster name"),
+    TARGET_METASTORE_URI("targetMetastoreUri", "Target Hive metastore uri", false),
+    TARGET_HS2_URI("targetHiveServer2Uri", "Target HS2 uri"),
+    TARGET_STAGING_PATH("targetStagingPath", "Location of target staging path", false),
+    TARGET_NN("targetNN", "Target name node", false),
+    TARGET_NN_KERBEROS_PRINCIPAL("targetNNKerberosPrincipal", "Target name node kerberos principal", false),
+    TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL("targetHiveMetastoreKerberosPrincipal",
+            "Target hive metastore kerberos principal", false),
+    TARGET_HIVE2_KERBEROS_PRINCIPAL("targetHive2KerberosPrincipal",
+            "Target hiveserver2 kerberos principal", false),
+
+    MAX_EVENTS("maxEvents", "Maximum events to replicate", false),
+    MAX_MAPS("replicationMaxMaps", "Maximum number of maps used during replication", false),
+    DISTCP_MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during distcp", false),
+    MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication"),
+    CLUSTER_FOR_JOB_RUN("clusterForJobRun", "Cluster on which replication job runs", false),
+    CLUSTER_FOR_JOB_NN_KERBEROS_PRINCIPAL("Job cluster kerberos principal",
+            "Write EP of cluster on which replication job runs", false),
+    CLUSTER_FOR_JOB_RUN_WRITE_EP("clusterForJobRunWriteEP", "Write EP of cluster on which replication job runs", false),
+    TDE_ENCRYPTION_ENABLED("tdeEncryptionEnabled", "Set to true if TDE encryption is enabled", false),
+    HIVE_MIRRORING_JOB_NAME("jobName", "Unique hive replication job name", false);
+
+    private final String name;
+    private final String description;
+    private final boolean isRequired;
+
+    HiveMirroringExtensionProperties(String name, String description) {
+        this(name, description, true);
+    }
+
+    HiveMirroringExtensionProperties(String name, String description, boolean isRequired) {
+        this.name = name;
+        this.description = description;
+        this.isRequired = isRequired;
+    }
+
+    public String getName() {
+        return this.name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public boolean isRequired() {
+        return isRequired;
+    }
+
+    @Override
+    public String toString() {
+        return getName();
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/store/ExtensionStore.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/store/ExtensionStore.java b/extensions/src/main/java/org/apache/falcon/extensions/store/ExtensionStore.java
new file mode 100644
index 0000000..9e07112
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/store/ExtensionStore.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.store;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.falcon.hadoop.HadoopClientFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.RemoteIterator;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.falcon.util.StartupProperties;
+import org.apache.falcon.entity.store.StoreAccessException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Store for Falcon extensions.
+ */
+public final class ExtensionStore {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ExtensionStore.class);
+    private FileSystem fs;
+
+    private Path storePath;
+
+    // Convention over configuration design paradigm
+    private static final String RESOURCES_DIR= "resources";
+    private static final String LIBS_DIR= "libs";
+
+    private static final String EXTENSION_STORE_URI = "extension.store.uri";
+
+    private static final ExtensionStore STORE = new ExtensionStore();
+
+    public static ExtensionStore get() {
+        return STORE;
+    }
+
+    private ExtensionStore() {
+        String uri = StartupProperties.get().getProperty(EXTENSION_STORE_URI);
+        if (StringUtils.isEmpty(uri)) {
+            throw new RuntimeException("Property extension.store.uri not set in startup properties."
+                    + "Please set it to path of extension deployment on HDFS. Extension store init failed");
+        }
+        storePath = new Path(uri);
+        fs = initializeFileSystem();
+    }
+
+    private FileSystem initializeFileSystem() {
+        try {
+            FileSystem fileSystem =
+                    HadoopClientFactory.get().createFalconFileSystem(storePath.toUri());
+            if (!fileSystem.exists(storePath)) {
+                LOG.info("Creating extension store directory: {}", storePath);
+                // set permissions so config store dir is owned by falcon alone
+                HadoopClientFactory.mkdirs(fileSystem, storePath, HadoopClientFactory.ALL_PERMISSION);
+            }
+
+            return fileSystem;
+        } catch (Exception e) {
+            throw new RuntimeException("Unable to bring up extension store for path: " + storePath, e);
+        }
+    }
+
+    public Map<String, String> getExtensionArtifacts(final String extensionName) throws StoreAccessException {
+        Map<String, String> extensionFileMap = new HashMap<>();
+        try {
+            Path extensionPath = new Path(storePath, extensionName.toLowerCase());
+            RemoteIterator<LocatedFileStatus> fileStatusListIterator = fs.listFiles(extensionPath, true);
+
+            if (!fileStatusListIterator.hasNext()) {
+                throw new StoreAccessException(new Exception(" For extension " + extensionName
+                        + " there are no artifacts at the extension store path " + storePath));
+            }
+            while (fileStatusListIterator.hasNext()) {
+                LocatedFileStatus fileStatus = fileStatusListIterator.next();
+                Path filePath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
+                extensionFileMap.put(filePath.getName(), filePath.toString());
+            }
+        } catch (IOException e) {
+            throw new StoreAccessException(e);
+        }
+        return extensionFileMap;
+    }
+
+    public Map<String, String> getExtensionResources(final String extensionName) throws StoreAccessException {
+        Map<String, String> extensionFileMap = new HashMap<>();
+        try {
+            Path extensionPath = new Path(storePath, extensionName.toLowerCase());
+
+            Path resourcesPath = null;
+            FileStatus[] files = fs.listStatus(extensionPath);
+
+            for (FileStatus fileStatus : files) {
+                if (fileStatus.getPath().getName().equalsIgnoreCase(RESOURCES_DIR)) {
+                    resourcesPath = fileStatus.getPath();
+                    break;
+                }
+            }
+
+            if (resourcesPath == null) {
+                throw new StoreAccessException(new Exception(" For extension " + extensionName
+                        + " there is no " + RESOURCES_DIR + "at the extension store path " + storePath));
+            }
+            RemoteIterator<LocatedFileStatus> fileStatusListIterator = fs.listFiles(resourcesPath, true);
+            while (fileStatusListIterator.hasNext()) {
+                LocatedFileStatus fileStatus = fileStatusListIterator.next();
+                Path filePath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
+                extensionFileMap.put(filePath.getName(), filePath.toString());
+            }
+        } catch (IOException e) {
+            throw new StoreAccessException(e);
+        }
+        return extensionFileMap;
+    }
+
+    public String getExtensionLibPath(final String extensionName) throws StoreAccessException {
+        try {
+            Path extensionPath = new Path(storePath, extensionName.toLowerCase());
+
+            Path libsPath = null;
+            FileStatus[] files = fs.listStatus(extensionPath);
+
+            for (FileStatus fileStatus : files) {
+                if (fileStatus.getPath().getName().equalsIgnoreCase(LIBS_DIR)) {
+                    if (fileStatus.getLen() != 0) {
+                        libsPath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
+                    }
+                    break;
+                }
+            }
+
+            if (libsPath == null) {
+                LOG.info("For extension " + extensionName + " there is no "
+                        + LIBS_DIR + "at the extension store path " + storePath);
+                return null;
+            } else {
+                return libsPath.toString();
+            }
+        } catch (IOException e) {
+            throw new StoreAccessException(e);
+        }
+    }
+
+    public String getExtensionResource(final String resourcePath) throws StoreAccessException {
+        if (StringUtils.isBlank(resourcePath)) {
+            throw new StoreAccessException(new Exception("Resource path cannot be null or empty"));
+        }
+
+        try {
+            Path resourceFile = new Path(resourcePath);
+
+            ByteArrayOutputStream writer = new ByteArrayOutputStream();
+            InputStream data = fs.open(resourceFile);
+            IOUtils.copyBytes(data, writer, fs.getConf(), true);
+            return writer.toString();
+        } catch (IOException e) {
+            throw new StoreAccessException(e);
+        }
+    }
+
+    public List<String> getExtensions() throws StoreAccessException {
+        List<String> extesnionList = new ArrayList<>();
+        try {
+            FileStatus[] fileStatuses = fs.listStatus(storePath);
+
+            for (FileStatus fileStatus : fileStatuses) {
+                if (fileStatus.isDirectory()) {
+                    Path filePath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
+                    extesnionList.add(filePath.getName());
+                }
+            }
+        } catch (IOException e) {
+            throw new StoreAccessException(e);
+        }
+        return extesnionList;
+    }
+
+    public Path getExtensionStorePath() {
+        return storePath;
+    }
+
+    public boolean isExtensionStoreInitialized() {
+        return (storePath != null);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/util/ExtensionProcessBuilderUtils.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/util/ExtensionProcessBuilderUtils.java b/extensions/src/main/java/org/apache/falcon/extensions/util/ExtensionProcessBuilderUtils.java
new file mode 100644
index 0000000..92e9805
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/util/ExtensionProcessBuilderUtils.java
@@ -0,0 +1,309 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.util;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.v0.Entity;
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.entity.v0.Frequency;
+import org.apache.falcon.entity.v0.SchemaHelper;
+import org.apache.falcon.entity.v0.process.ACL;
+import org.apache.falcon.entity.v0.process.Cluster;
+import org.apache.falcon.entity.v0.process.EngineType;
+import org.apache.falcon.entity.v0.process.Notification;
+import org.apache.falcon.entity.v0.process.PolicyType;
+import org.apache.falcon.entity.v0.process.Property;
+import org.apache.falcon.entity.v0.process.Retry;
+import org.apache.falcon.entity.v0.process.Workflow;
+import org.apache.falcon.extensions.ExtensionProperties;
+import org.apache.falcon.security.SecurityUtil;
+import org.apache.falcon.util.NotificationType;
+
+import javax.xml.bind.Unmarshaller;
+import javax.xml.bind.ValidationEvent;
+import javax.xml.bind.ValidationEventHandler;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.TimeZone;
+
+
+/**
+ * Extension builder utility.
+ */
+public final class ExtensionProcessBuilderUtils {
+
+    private static final Pattern EXTENSION_VAR_PATTERN = Pattern.compile("##[A-Za-z0-9_.]*##");
+
+    private ExtensionProcessBuilderUtils() {
+    }
+
+    public static Entity createProcessFromTemplate(final String processTemplate,
+                                                         final String extensionName,
+                                                         final Properties extensionProperties,
+                                                         final String wfPath,
+                                                         final String wfLibPath) throws FalconException {
+        if (StringUtils.isBlank(processTemplate) || StringUtils.isBlank(extensionName)
+                || extensionProperties == null || StringUtils.isBlank(wfPath)) {
+            throw new FalconException("Invalid arguments passed to extension builder");
+        }
+        org.apache.falcon.entity.v0.process.Process process = bindAttributesInTemplate(
+                processTemplate, extensionProperties, extensionName, wfPath, wfLibPath);
+
+        validateGeneratedProcess(process.toString());
+        return process;
+    }
+
+    private static org.apache.falcon.entity.v0.process.Process
+    bindAttributesInTemplate(final String processTemplate, final Properties extensionProperties,
+                             final String extensionName, final String wfPath,
+                             final String wfLibPath)
+        throws FalconException {
+        if (StringUtils.isBlank(processTemplate) || extensionProperties == null) {
+            throw new FalconException("Process template or properties cannot be null");
+        }
+
+        org.apache.falcon.entity.v0.process.Process process;
+        try {
+            Unmarshaller unmarshaller = EntityType.PROCESS.getUnmarshaller();
+            // Validation can be skipped for unmarshalling as we want to bind template with the properties.
+            // Vaildation is handled as part of marshalling
+            unmarshaller.setSchema(null);
+            unmarshaller.setEventHandler(new ValidationEventHandler() {
+                    public boolean handleEvent(ValidationEvent validationEvent) {
+                        return true;
+                    }
+                }
+            );
+            process = (org.apache.falcon.entity.v0.process.Process)
+                    unmarshaller.unmarshal(new StringReader(processTemplate));
+        } catch (Exception e) {
+            throw new FalconException(e);
+        }
+
+        /* For optional properties user might directly set them in the process xml and might not set it in properties
+           file. Before doing the submission validation is done to confirm process xml doesn't have
+           EXTENSION_VAR_PATTERN
+        */
+
+        String processName = extensionProperties.getProperty(ExtensionProperties.JOB_NAME.getName());
+        if (StringUtils.isNotEmpty(processName)) {
+            process.setName(processName);
+        }
+
+        // DR process template has only one cluster
+        bindClusterProperties(process.getClusters().getClusters().get(0), extensionProperties);
+
+        // bind scheduling properties
+        String processFrequency = extensionProperties.getProperty(ExtensionProperties.FREQUENCY.getName());
+        if (StringUtils.isNotEmpty(processFrequency)) {
+            process.setFrequency(Frequency.fromString(processFrequency));
+        }
+
+        String zone = extensionProperties.getProperty(ExtensionProperties.TIMEZONE.getName());
+        if (StringUtils.isNotBlank(zone)) {
+            process.setTimezone(TimeZone.getTimeZone(zone));
+        } else {
+            process.setTimezone(TimeZone.getTimeZone("UTC"));
+        }
+
+        bindWorkflowProperties(process.getWorkflow(), extensionName, wfPath, wfLibPath);
+        bindRetryProperties(process.getRetry(), extensionProperties);
+        bindNotificationProperties(process.getNotification(), extensionProperties);
+        bindACLProperties(process.getACL(), extensionProperties);
+        bindTagsProperties(process, extensionProperties);
+        bindCustomProperties(process.getProperties(), extensionProperties);
+
+        return process;
+    }
+
+    private static void bindClusterProperties(final Cluster cluster,
+                                              final Properties extensionProperties) {
+        String clusterName = extensionProperties.getProperty(ExtensionProperties.CLUSTER_NAME.getName());
+        if (StringUtils.isNotEmpty(clusterName)) {
+            cluster.setName(clusterName);
+        }
+        String clusterStartValidity = extensionProperties.getProperty(ExtensionProperties.VALIDITY_START.getName());
+        if (StringUtils.isNotEmpty(clusterStartValidity)) {
+            cluster.getValidity().setStart(SchemaHelper.parseDateUTC(clusterStartValidity));
+        }
+
+        String clusterEndValidity = extensionProperties.getProperty(ExtensionProperties.VALIDITY_END.getName());
+        if (StringUtils.isNotEmpty(clusterEndValidity)) {
+            cluster.getValidity().setEnd(SchemaHelper.parseDateUTC(clusterEndValidity));
+        }
+    }
+
+    private static void bindWorkflowProperties(final Workflow wf,
+                                               final String extensionName,
+                                               final String wfPath,
+                                               final String wfLibPath) {
+        final EngineType defaultEngineType = EngineType.OOZIE;
+        final String workflowNameSuffix = "-workflow";
+
+        wf.setName(extensionName + workflowNameSuffix);
+        wf.setEngine(defaultEngineType);
+        wf.setPath(wfPath);
+        if (StringUtils.isNotEmpty(wfLibPath)) {
+            wf.setLib(wfLibPath);
+        } else {
+            wf.setLib("");
+        }
+    }
+
+    private static void bindRetryProperties(final Retry processRetry,
+                                            final Properties extensionProperties) {
+        final PolicyType defaultRetryPolicy = PolicyType.PERIODIC;
+        final int defaultRetryAttempts = 3;
+        final Frequency defaultRetryDelay = new Frequency("minutes(30)");
+
+        String retryPolicy = extensionProperties.getProperty(ExtensionProperties.RETRY_POLICY.getName());
+        if (StringUtils.isNotBlank(retryPolicy)) {
+            processRetry.setPolicy(PolicyType.fromValue(retryPolicy));
+        } else {
+            processRetry.setPolicy(defaultRetryPolicy);
+        }
+
+        String retryAttempts = extensionProperties.getProperty(ExtensionProperties.RETRY_ATTEMPTS.getName());
+        if (StringUtils.isNotBlank(retryAttempts)) {
+            processRetry.setAttempts(Integer.parseInt(retryAttempts));
+        } else {
+            processRetry.setAttempts(defaultRetryAttempts);
+        }
+
+        String retryDelay = extensionProperties.getProperty(ExtensionProperties.RETRY_DELAY.getName());
+        if (StringUtils.isNotBlank(retryDelay)) {
+            processRetry.setDelay(Frequency.fromString(retryDelay));
+        } else {
+            processRetry.setDelay(defaultRetryDelay);
+        }
+
+        String retryOnTimeout = extensionProperties.getProperty(ExtensionProperties.RETRY_ON_TIMEOUT.getName());
+        if (StringUtils.isNotBlank(retryOnTimeout)) {
+            processRetry.setOnTimeout(Boolean.valueOf(retryOnTimeout));
+        } else {
+            processRetry.setOnTimeout(false);
+        }
+    }
+
+    private static void bindNotificationProperties(final Notification processNotification,
+                                                   final Properties extensionProperties) {
+        final String defaultNotificationType = NotificationType.EMAIL.getName();
+
+        String notificationType = extensionProperties.getProperty(
+                ExtensionProperties.JOB_NOTIFICATION_TYPE.getName());
+        if (StringUtils.isNotBlank(notificationType)) {
+            processNotification.setType(notificationType);
+        } else {
+            processNotification.setType(defaultNotificationType);
+        }
+
+        String notificationAddress = extensionProperties.getProperty(
+                ExtensionProperties.JOB_NOTIFICATION_ADDRESS.getName());
+        if (StringUtils.isNotBlank(notificationAddress)) {
+            processNotification.setTo(notificationAddress);
+        } else {
+            processNotification.setTo("NA");
+        }
+    }
+
+    private static void bindACLProperties(final ACL acl,
+                                          final Properties extensionProperties) throws FalconException {
+        if (!SecurityUtil.isAuthorizationEnabled()) {
+            return;
+        }
+
+        String aclowner = extensionProperties.getProperty(ExtensionProperties.JOB_ACL_OWNER.getName());
+        if (StringUtils.isNotEmpty(aclowner)) {
+            acl.setOwner(aclowner);
+        } else {
+            throw new FalconException("ACL owner extension property cannot be null or empty when authorization is "
+                    + "enabled");
+        }
+
+        String aclGroup = extensionProperties.getProperty(ExtensionProperties.JOB_ACL_GROUP.getName());
+        if (StringUtils.isNotEmpty(aclGroup)) {
+            acl.setGroup(aclGroup);
+        } else {
+            throw new FalconException("ACL group extension property cannot be null or empty when authorization is "
+                    + "enabled");
+        }
+
+        String aclPermission = extensionProperties.getProperty(ExtensionProperties.JOB_ACL_PERMISSION.getName());
+        if (StringUtils.isNotEmpty(aclPermission)) {
+            acl.setPermission(aclPermission);
+        } else {
+            throw new FalconException("ACL permission extension property cannot be null or empty when authorization is "
+                    + "enabled");
+        }
+    }
+
+    private static void bindTagsProperties(final org.apache.falcon.entity.v0.process.Process process,
+                                           final Properties extensionProperties) {
+        String falconSystemTags = process.getTags();
+        String tags = extensionProperties.getProperty(ExtensionProperties.JOB_TAGS.getName());
+        if (StringUtils.isNotEmpty(tags)) {
+            if (StringUtils.isNotEmpty(falconSystemTags)) {
+                tags += ", " + falconSystemTags;
+            }
+            process.setTags(tags);
+        }
+    }
+
+
+    private static void bindCustomProperties(final org.apache.falcon.entity.v0.process.Properties customProperties,
+                                             final Properties extensionProperties) {
+        List<Property> propertyList = new ArrayList<>();
+
+        for (Map.Entry<Object, Object> extensionProperty : extensionProperties.entrySet()) {
+            if (ExtensionProperties.getOptionsMap().get(extensionProperty.getKey().toString()) == null) {
+                addProperty(propertyList, (String) extensionProperty.getKey(), (String) extensionProperty.getValue());
+            }
+        }
+
+        customProperties.getProperties().addAll(propertyList);
+    }
+
+    private static void addProperty(List<Property> propertyList, String name, String value) {
+        Property prop = new Property();
+        prop.setName(name);
+        prop.setValue(value);
+        propertyList.add(prop);
+    }
+
+    private static void validateGeneratedProcess(final String generatedProcess) throws FalconException {
+        if (StringUtils.isBlank(generatedProcess)) {
+            throw new IllegalArgumentException("Invalid arguments passed");
+        }
+
+        Matcher matcher = EXTENSION_VAR_PATTERN.matcher(generatedProcess);
+        if (matcher.find()) {
+            String variable = generatedProcess.substring(matcher.start(), matcher.end());
+            throw new FalconException("Match not found for the template: " + variable
+                    + " in extension template file. Please add it in extension properties file");
+        }
+    }
+
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/java/org/apache/falcon/extensions/ExtensionServiceTest.java
----------------------------------------------------------------------
diff --git a/extensions/src/test/java/org/apache/falcon/extensions/ExtensionServiceTest.java b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionServiceTest.java
new file mode 100644
index 0000000..c8df2c0
--- /dev/null
+++ b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionServiceTest.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import org.testng.Assert;
+
+/**
+ * Unit tests for ExtensionService.
+ */
+public class ExtensionServiceTest {
+
+    private ExtensionService service;
+
+    @BeforeClass
+    public void setUp() throws Exception {
+        service = new ExtensionService();
+        service.init();
+    }
+
+    @AfterClass
+    public void tearDown() throws Exception {
+        service.destroy();
+    }
+
+    @Test
+    public void testGetName() throws Exception {
+        Assert.assertEquals(service.getName(), ExtensionService.SERVICE_NAME);
+    }
+
+    @Test
+    public void testGetextensionStore() throws Exception {
+        Assert.assertNotNull(ExtensionService.getExtensionStore());
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java
----------------------------------------------------------------------
diff --git a/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java
new file mode 100644
index 0000000..ffd9336
--- /dev/null
+++ b/extensions/src/test/java/org/apache/falcon/extensions/ExtensionTest.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import junit.framework.Assert;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.ClusterHelper;
+import org.apache.falcon.entity.EntityUtil;
+import org.apache.falcon.entity.parser.EntityParserFactory;
+import org.apache.falcon.entity.parser.ProcessEntityParser;
+import org.apache.falcon.entity.store.ConfigurationStore;
+import org.apache.falcon.entity.v0.Entity;
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.entity.v0.SchemaHelper;
+import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.entity.v0.process.EngineType;
+import org.apache.falcon.entity.v0.process.PolicyType;
+import org.apache.falcon.entity.v0.process.Process;
+import org.apache.falcon.extensions.mirroring.hdfs.HdfsMirroringExtension;
+import org.apache.falcon.extensions.mirroring.hdfs.HdfsMirroringExtensionProperties;
+import org.apache.falcon.extensions.store.AbstractTestExtensionStore;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Tests for Extension.
+ */
+public class ExtensionTest extends AbstractTestExtensionStore {
+    private static final String PRIMARY_CLUSTER_XML = "/primary-cluster-0.1.xml";
+    private static final String BACKUP_CLUSTER_XML = "/backup-cluster-0.1.xml";
+    private static final String JOB_NAME = "hdfs-mirroring-monthly";
+    private static final String JOB_CLUSTER_NAME = "primaryCluster";
+    private static final String VALIDITY_START = "2016-01-02T00:00Z";
+    private static final String VALIDITY_END = "2018-01-02T00:00Z";
+    private static final String FREQUENCY = "days(1)";
+    private static final String SOURCEDIR = "/users/source/file1";
+    private static final String SOURCE_CLUSTER = "primaryCluster";
+    private static final String TARGETDIR = "/users/target/file1";
+    private static final String TARGET_CLUSTER = "backupCluster";
+    private Extension extension;
+
+    private static Properties getHdfsProperties() {
+        Properties properties = new Properties();
+        properties.setProperty(ExtensionProperties.JOB_NAME.getName(),
+                JOB_NAME);
+        properties.setProperty(ExtensionProperties.CLUSTER_NAME.getName(),
+                JOB_CLUSTER_NAME);
+        properties.setProperty(ExtensionProperties.VALIDITY_START.getName(),
+                VALIDITY_START);
+        properties.setProperty(ExtensionProperties.VALIDITY_END.getName(),
+                VALIDITY_END);
+        properties.setProperty(ExtensionProperties.FREQUENCY.getName(),
+                FREQUENCY);
+        properties.setProperty(HdfsMirroringExtensionProperties.SOURCE_DIR.getName(),
+                SOURCEDIR);
+        properties.setProperty(HdfsMirroringExtensionProperties.SOURCE_CLUSTER.getName(),
+                SOURCE_CLUSTER);
+        properties.setProperty(HdfsMirroringExtensionProperties.TARGET_DIR.getName(),
+                TARGETDIR);
+        properties.setProperty(HdfsMirroringExtensionProperties.TARGET_CLUSTER.getName(),
+                TARGET_CLUSTER);
+
+        return properties;
+    }
+
+    @BeforeClass
+    public void init() throws Exception {
+        extension = new Extension();
+        initClusters();
+    }
+
+    private void initClusters() throws Exception {
+        InputStream inputStream = getClass().getResourceAsStream(PRIMARY_CLUSTER_XML);
+        Cluster primaryCluster = (Cluster) EntityType.CLUSTER.getUnmarshaller().unmarshal(inputStream);
+        ConfigurationStore.get().publish(EntityType.CLUSTER, primaryCluster);
+
+        inputStream = getClass().getResourceAsStream(BACKUP_CLUSTER_XML);
+        Cluster backupCluster = (Cluster) EntityType.CLUSTER.getUnmarshaller().unmarshal(inputStream);
+        ConfigurationStore.get().publish(EntityType.CLUSTER, backupCluster);
+    }
+
+    @Test
+    public void testGetExtensionEntitiesForHdfsMirroring() throws FalconException {
+        ProcessEntityParser parser = (ProcessEntityParser) EntityParserFactory.getParser(EntityType.PROCESS);
+
+        List<Entity> entities = extension.getEntities(new HdfsMirroringExtension().getName(), getHdfsProperties());
+        if (entities == null || entities.isEmpty()) {
+            Assert.fail("Entities returned cannot be null or empty");
+        }
+
+        Assert.assertEquals(1, entities.size());
+        Entity entity = entities.get(0);
+
+
+        Assert.assertEquals(EntityType.PROCESS, entity.getEntityType());
+        parser.parse(new ByteArrayInputStream(entity.toString().getBytes()));
+
+        // Validate
+        Process processEntity = (Process) entity;
+        Assert.assertEquals(JOB_NAME, processEntity.getName());
+        org.apache.falcon.entity.v0.process.Cluster jobCluster = processEntity.getClusters().
+                getClusters().get(0);
+        Assert.assertEquals(JOB_CLUSTER_NAME, jobCluster.getName());
+        Assert.assertEquals(VALIDITY_START, SchemaHelper.formatDateUTC(jobCluster.getValidity().getStart()));
+        Assert.assertEquals(VALIDITY_END, SchemaHelper.formatDateUTC(jobCluster.getValidity().getEnd()));
+
+        Assert.assertEquals(FREQUENCY, processEntity.getFrequency().toString());
+        Assert.assertEquals("UTC", processEntity.getTimezone().getID());
+
+        Assert.assertEquals(EngineType.OOZIE, processEntity.getWorkflow().getEngine());
+        Assert.assertEquals(extensionStorePath + "/hdfs-mirroring/libs",
+                processEntity.getWorkflow().getLib());
+        Assert.assertEquals(extensionStorePath
+                + "/hdfs-mirroring/resources/runtime/hdfs-mirroring-workflow.xml",
+                processEntity.getWorkflow().getPath());
+
+        Properties props = EntityUtil.getEntityProperties(processEntity);
+
+        String srcClusterEndPoint = ClusterHelper.getReadOnlyStorageUrl(ClusterHelper.getCluster(SOURCE_CLUSTER));
+        Assert.assertEquals(srcClusterEndPoint + SOURCEDIR, props.getProperty("sourceDir"));
+        Assert.assertEquals(SOURCE_CLUSTER, props.getProperty("sourceCluster"));
+        Assert.assertEquals(TARGETDIR, props.getProperty("targetDir"));
+        Assert.assertEquals(TARGET_CLUSTER, props.getProperty("targetCluster"));
+
+        //retry
+        Assert.assertEquals(3, processEntity.getRetry().getAttempts());
+        Assert.assertEquals(PolicyType.PERIODIC, processEntity.getRetry().getPolicy());
+        Assert.assertEquals("minutes(30)", processEntity.getRetry().getDelay().toString());
+    }
+
+    @Test(expectedExceptions = FalconException.class,
+            expectedExceptionsMessageRegExp = "Missing extension property: jobClusterName")
+    public void testGetExtensionEntitiesForHdfsMirroringMissingMandatoryProperties() throws FalconException {
+        Properties props = getHdfsProperties();
+        props.remove(ExtensionProperties.CLUSTER_NAME.getName());
+
+        extension.getEntities(new HdfsMirroringExtension().getName(), props);
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/java/org/apache/falcon/extensions/store/AbstractTestExtensionStore.java
----------------------------------------------------------------------
diff --git a/extensions/src/test/java/org/apache/falcon/extensions/store/AbstractTestExtensionStore.java b/extensions/src/test/java/org/apache/falcon/extensions/store/AbstractTestExtensionStore.java
new file mode 100644
index 0000000..b62b475
--- /dev/null
+++ b/extensions/src/test/java/org/apache/falcon/extensions/store/AbstractTestExtensionStore.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.store;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.falcon.extensions.AbstractExtension;
+import org.apache.falcon.extensions.ExtensionService;
+import org.apache.falcon.hadoop.HadoopClientFactory;
+import org.apache.falcon.util.StartupProperties;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+/**
+ *  Abstract class to setup extension store.
+*/
+public class AbstractTestExtensionStore {
+
+    protected String extensionStorePath;
+    protected ExtensionStore store;
+    private FileSystem fileSystem;
+
+    @BeforeClass
+    public void initConfigStore() throws Exception {
+        String configPath = new URI(StartupProperties.get().getProperty("extension.store.uri")).getPath();
+        extensionStorePath = configPath + "-" + getClass().getName();
+        StartupProperties.get().setProperty("extension.store.uri", extensionStorePath);
+        new ExtensionService().init();
+        store = ExtensionService.getExtensionStore();
+        fileSystem = HadoopClientFactory.get().createFalconFileSystem(new Configuration(true));
+
+        extensionStoreSetup();
+    }
+
+    private void extensionStoreSetup() throws IOException {
+        List<AbstractExtension> extensions = AbstractExtension.getExtensions();
+        for (AbstractExtension extension : extensions) {
+            String extensionName = extension.getName().toLowerCase();
+            Path extensionPath = new Path(extensionStorePath, extensionName);
+            Path libPath = new Path(extensionPath, "libs");
+            Path resourcesPath = new Path(extensionPath, "resources");
+            HadoopClientFactory.mkdirs(fileSystem, extensionPath,
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+            HadoopClientFactory.mkdirs(fileSystem, new Path(extensionPath, new Path("README")),
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+
+            HadoopClientFactory.mkdirs(fileSystem, libPath,
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+            HadoopClientFactory.mkdirs(fileSystem, new Path(libPath, "build"),
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+            HadoopClientFactory.mkdirs(fileSystem, new Path(libPath, "runtime"),
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+
+            HadoopClientFactory.mkdirs(fileSystem, resourcesPath,
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+            HadoopClientFactory.mkdirs(fileSystem, new Path(resourcesPath, "build"),
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+            Path runTimeResourcePath = new Path(resourcesPath, "runtime");
+            HadoopClientFactory.mkdirs(fileSystem, runTimeResourcePath,
+                    HadoopClientFactory.READ_EXECUTE_PERMISSION);
+
+            fileSystem.create(new Path(runTimeResourcePath, extensionName + "-workflow.xml"));
+            Path dstFile = new Path(runTimeResourcePath, extensionName + "-template.xml");
+            fileSystem.create(dstFile);
+            String srcFile = extensionName + "-template.xml";
+            fileSystem.copyFromLocalFile(new Path(getAbsolutePath(srcFile)), dstFile);
+        }
+
+    }
+
+    private String getAbsolutePath(String fileName) {
+        return this.getClass().getResource("/" + fileName).getPath();
+    }
+
+
+    @AfterClass
+    public void cleanUp() throws Exception {
+        FileUtils.deleteDirectory(new File(extensionStorePath));
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/java/org/apache/falcon/extensions/store/ExtensionStoreTest.java
----------------------------------------------------------------------
diff --git a/extensions/src/test/java/org/apache/falcon/extensions/store/ExtensionStoreTest.java b/extensions/src/test/java/org/apache/falcon/extensions/store/ExtensionStoreTest.java
new file mode 100644
index 0000000..3462321
--- /dev/null
+++ b/extensions/src/test/java/org/apache/falcon/extensions/store/ExtensionStoreTest.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.store;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.falcon.entity.store.StoreAccessException;
+import org.apache.falcon.extensions.mirroring.hdfs.HdfsMirroringExtension;
+import org.testng.Assert;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import java.util.Map;
+
+/**
+ *  Tests for extension store.
+ */
+public class ExtensionStoreTest extends AbstractTestExtensionStore {
+    private static Map<String, String> resourcesMap;
+
+    @BeforeClass
+    public void init() throws Exception {
+        resourcesMap = ImmutableMap.of(
+                "hdfs-mirroring-template.xml", extensionStorePath
+                        + "/hdfs-mirroring/resources/runtime/hdfs-mirroring-template.xml",
+                "hdfs-mirroring-workflow.xml", extensionStorePath
+                        + "/hdfs-mirroring/resources/runtime/hdfs-mirroring-workflow.xml"
+        );
+    }
+
+    @Test
+    public void testGetExtensionResources() throws StoreAccessException {
+        String extensionName = new HdfsMirroringExtension().getName();
+        Map<String, String> resources = store.getExtensionResources(extensionName);
+
+        for (Map.Entry<String, String> entry : resources.entrySet()) {
+            String path = resourcesMap.get(entry.getKey());
+            Assert.assertEquals(entry.getValue(), path);
+        }
+    }
+
+    @Test
+    public void testGetExtensionLibPath() throws StoreAccessException {
+        String extensionName = new HdfsMirroringExtension().getName();
+        String libPath = extensionStorePath + "/hdfs-mirroring/libs";
+        Assert.assertEquals(store.getExtensionLibPath(extensionName), libPath);
+    }
+
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/resources/backup-cluster-0.1.xml
----------------------------------------------------------------------
diff --git a/extensions/src/test/resources/backup-cluster-0.1.xml b/extensions/src/test/resources/backup-cluster-0.1.xml
new file mode 100644
index 0000000..c3ba6b9
--- /dev/null
+++ b/extensions/src/test/resources/backup-cluster-0.1.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<cluster colo="gs" description="" name="backupCluster" xmlns="uri:falcon:cluster:0.1"
+        >
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:50010"
+                   version="0.20.2"/>
+        <interface type="write" endpoint="hdfs://localhost:8020"
+                   version="0.20.2"/>
+        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
+        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
+                   version="4.0"/>
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.1.6"/>
+        <interface type="registry" endpoint="Hcat" version="1"/>
+    </interfaces>
+    <locations>
+        <location name="staging" path="/projects/falcon/staging"/>
+        <location name="temp" path="/tmp"/>
+        <location name="working" path="/projects/falcon/working"/>
+    </locations>
+    <properties>
+        <property name="field1" value="value1"/>
+        <property name="field2" value="value2"/>
+        <property name="hive.metastore.client.socket.timeout" value="20"/>
+    </properties>
+</cluster>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/resources/hdfs-mirroring-template.xml
----------------------------------------------------------------------
diff --git a/extensions/src/test/resources/hdfs-mirroring-template.xml b/extensions/src/test/resources/hdfs-mirroring-template.xml
new file mode 100644
index 0000000..6c35c5b
--- /dev/null
+++ b/extensions/src/test/resources/hdfs-mirroring-template.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##job.name##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##job.cluster.name##">
+            <validity end="##job.validity.end##" start="##job.validity.start##"/>
+        </cluster>
+    </clusters>
+
+    <tags/>
+
+    <parallel>1</parallel>
+    <!-- Dir replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##job.frequency##</frequency>
+    <timezone>##job.timezone##</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##job.workflow.name##" engine="##job.workflow.engine##"
+              path="##job.workflow.path##" lib="##job.workflow.lib.path##"/>
+    <retry policy="##job.retry.policy##" delay="##job.retry.delay##" attempts="3"/>
+    <notification type="##job.notification.type##" to="##job.notification.receivers##"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/resources/hive-mirroring-template.xml
----------------------------------------------------------------------
diff --git a/extensions/src/test/resources/hive-mirroring-template.xml b/extensions/src/test/resources/hive-mirroring-template.xml
new file mode 100644
index 0000000..9f28991
--- /dev/null
+++ b/extensions/src/test/resources/hive-mirroring-template.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##name##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##cluster.name##">
+            <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
+        </cluster>
+    </clusters>
+
+    <tags/>
+
+    <parallel>1</parallel>
+    <!-- Replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##process.frequency##</frequency>
+    <timezone>UTC</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##workflow.name##" engine="oozie"
+              path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
+    <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
+    <notification type="##notification.type##" to="##notification.receivers##"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/test/resources/primary-cluster-0.1.xml
----------------------------------------------------------------------
diff --git a/extensions/src/test/resources/primary-cluster-0.1.xml b/extensions/src/test/resources/primary-cluster-0.1.xml
new file mode 100644
index 0000000..a9694c2
--- /dev/null
+++ b/extensions/src/test/resources/primary-cluster-0.1.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<cluster colo="gs" description="" name="primaryCluster" xmlns="uri:falcon:cluster:0.1"
+        >
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:50010"
+                   version="0.20.2"/>
+        <interface type="write" endpoint="hdfs://localhost:8020"
+                   version="0.20.2"/>
+        <interface type="execute" endpoint="localhost:8021" version="0.20.2"/>
+        <interface type="workflow" endpoint="http://localhost:11000/oozie/"
+                   version="4.0"/>
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.1.6"/>
+        <interface type="registry" endpoint="Hcat" version="1"/>
+    </interfaces>
+    <locations>
+        <location name="staging" path="/projects/falcon/staging"/>
+        <location name="temp" path="/tmp"/>
+        <location name="working" path="/projects/falcon/working"/>
+    </locations>
+    <properties>
+        <property name="field1" value="value1"/>
+        <property name="field2" value="value2"/>
+        <property name="hive.metastore.client.socket.timeout" value="20"/>
+    </properties>
+</cluster>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/oozie/pom.xml
----------------------------------------------------------------------
diff --git a/oozie/pom.xml b/oozie/pom.xml
index c53d33c..04b3df6 100644
--- a/oozie/pom.xml
+++ b/oozie/pom.xml
@@ -67,6 +67,12 @@
 
         <dependency>
             <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-extensions</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
             <artifactId>falcon-common</artifactId>
         </dependency>
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/oozie/src/main/java/org/apache/falcon/service/SharedLibraryHostingService.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/falcon/service/SharedLibraryHostingService.java b/oozie/src/main/java/org/apache/falcon/service/SharedLibraryHostingService.java
index c15d6b9..815f5f7 100644
--- a/oozie/src/main/java/org/apache/falcon/service/SharedLibraryHostingService.java
+++ b/oozie/src/main/java/org/apache/falcon/service/SharedLibraryHostingService.java
@@ -29,9 +29,14 @@ import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
 import org.apache.falcon.entity.v0.cluster.Interfacetype;
 import org.apache.falcon.hadoop.HadoopClientFactory;
 import org.apache.falcon.util.StartupProperties;
+import org.apache.falcon.extensions.store.ExtensionStore;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -75,16 +80,70 @@ public class SharedLibraryHostingService implements ConfigurationChangeListener
         }
     };
 
-    private void addLibsTo(Cluster cluster) throws FalconException {
-        FileSystem fs = null;
+    private void pushExtensionArtifactsToCluster(final Cluster cluster,
+                                                 final FileSystem clusterFs) throws FalconException {
+
+        ExtensionStore store = ExtensionStore.get();
+        if (!store.isExtensionStoreInitialized()) {
+            LOG.info("Extension store not initialized by Extension service. Make sure Extension service is added in "
+                    + "start up properties");
+            return;
+        }
+
+        Path extensionStorePath = store.getExtensionStorePath();
+        LOG.info("extensionStorePath :{}", extensionStorePath);
+        FileSystem falconFileSystem =
+                HadoopClientFactory.get().createFalconFileSystem(extensionStorePath.toUri());
+        String nameNode = StringUtils.removeEnd(falconFileSystem.getConf().get(HadoopClientFactory
+                .FS_DEFAULT_NAME_KEY), File.separator);
+
+
+        String clusterStorageUrl = StringUtils.removeEnd(ClusterHelper.getStorageUrl(cluster), File.separator);
+
+        // If default fs for Falcon server is same as cluster fs abort copy
+        if (nameNode.equalsIgnoreCase(clusterStorageUrl)) {
+            LOG.info("clusterStorageUrl :{} same return", clusterStorageUrl);
+            return;
+        }
+
         try {
-            LOG.info("Initializing FS: {} for cluster: {}", ClusterHelper.getStorageUrl(cluster), cluster.getName());
-            fs = HadoopClientFactory.get().createFalconFileSystem(ClusterHelper.getConfiguration(cluster));
-            fs.getStatus();
-        } catch (Exception e) {
-            throw new FalconException("Failed to initialize FS for cluster : " + cluster.getName(), e);
+            RemoteIterator<LocatedFileStatus> fileStatusListIterator =
+                    falconFileSystem.listFiles(extensionStorePath, true);
+
+            while (fileStatusListIterator.hasNext()) {
+                LocatedFileStatus srcfileStatus = fileStatusListIterator.next();
+                Path filePath = Path.getPathWithoutSchemeAndAuthority(srcfileStatus.getPath());
+
+                if (srcfileStatus.isDirectory()) {
+                    if (!clusterFs.exists(filePath)) {
+                        HadoopClientFactory.mkdirs(clusterFs, filePath, srcfileStatus.getPermission());
+                    }
+                } else {
+                    if (clusterFs.exists(filePath)) {
+                        FileStatus targetfstat = clusterFs.getFileStatus(filePath);
+                        if (targetfstat.getLen() == srcfileStatus.getLen()) {
+                            continue;
+                        }
+                    }
+
+                    Path parentPath = filePath.getParent();
+                    if (!clusterFs.exists(parentPath)) {
+                        FsPermission dirPerm = falconFileSystem.getFileStatus(parentPath).getPermission();
+                        HadoopClientFactory.mkdirs(clusterFs, parentPath, dirPerm);
+                    }
+
+                    FileUtil.copy(falconFileSystem, srcfileStatus, clusterFs, filePath, false, true,
+                            falconFileSystem.getConf());
+                    FileUtil.chmod(clusterFs.makeQualified(filePath).toString(),
+                            srcfileStatus.getPermission().toString());
+                }
+            }
+        } catch (IOException | InterruptedException e) {
+            throw new FalconException("Failed to copy extension artifacts to cluster" + cluster.getName(), e);
         }
+    }
 
+    private void addLibsTo(Cluster cluster, FileSystem fs) throws FalconException {
         try {
             Path lib = new Path(ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
                     "lib");
@@ -173,7 +232,8 @@ public class SharedLibraryHostingService implements ConfigurationChangeListener
             return;
         }
 
-        addLibsTo(cluster);
+        addLibsTo(cluster, getFilesystem(cluster));
+        pushExtensionArtifactsToCluster(cluster, getFilesystem(cluster));
     }
 
     @Override
@@ -192,7 +252,8 @@ public class SharedLibraryHostingService implements ConfigurationChangeListener
                 .equals(ClusterHelper.getInterface(newCluster, Interfacetype.WRITE).getEndpoint())
                 || !ClusterHelper.getInterface(oldCluster, Interfacetype.WORKFLOW).getEndpoint()
                 .equals(ClusterHelper.getInterface(newCluster, Interfacetype.WORKFLOW).getEndpoint())) {
-            addLibsTo(newCluster);
+            addLibsTo(newCluster, getFilesystem(newCluster));
+            pushExtensionArtifactsToCluster(newCluster, getFilesystem(newCluster));
         }
     }
 
@@ -204,4 +265,16 @@ public class SharedLibraryHostingService implements ConfigurationChangeListener
             LOG.error(e.getMessage(), e);
         }
     }
+
+    private FileSystem getFilesystem(final Cluster cluster) throws FalconException {
+        FileSystem fs;
+        try {
+            LOG.info("Initializing FS: {} for cluster: {}", ClusterHelper.getStorageUrl(cluster), cluster.getName());
+            fs = HadoopClientFactory.get().createFalconFileSystem(ClusterHelper.getConfiguration(cluster));
+            fs.getStatus();
+            return fs;
+        } catch (Exception e) {
+            throw new FalconException("Failed to initialize FS for cluster : " + cluster.getName(), e);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index d768c6d..a62c030 100644
--- a/pom.xml
+++ b/pom.xml
@@ -301,6 +301,7 @@
                                 <exclude>**/db1.properties</exclude>
                                 <exclude>**/db1.script</exclude>
                                 <exclude>**/credential_provider.jceks</exclude>
+                                <exclude>**/*.json</exclude>
                             </excludes>
                         </configuration>
                         <executions>
@@ -429,6 +430,7 @@
         <module>retention</module>
         <module>archival</module>
         <module>rerun</module>
+        <module>extensions</module>
         <module>prism</module>
         <module>unit</module>
         <module>lifecycle</module>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/src/main/assemblies/distributed-package.xml
----------------------------------------------------------------------
diff --git a/src/main/assemblies/distributed-package.xml b/src/main/assemblies/distributed-package.xml
index 2eff638..d58f40b 100644
--- a/src/main/assemblies/distributed-package.xml
+++ b/src/main/assemblies/distributed-package.xml
@@ -115,17 +115,72 @@
             <fileMode>0644</fileMode>
             <directoryMode>0755</directoryMode>
         </fileSet>
+        <fileSet>
+            <directory>addons/extensions/hdfs-mirroring/src/main/META</directory>
+            <outputDirectory>extensions/hdfs-mirroring/META</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
 
         <fileSet>
-            <directory>../addons/recipes/hdfs-replication/src/main/resources</directory>
-            <outputDirectory>data-mirroring/hdfs-replication</outputDirectory>
-            <fileMode>0644</fileMode>
+            <directory>./</directory>
+            <outputDirectory>extensions/hdfs-mirroring/libs/build</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>./</directory>
+            <outputDirectory>extensions/hdfs-mirroring/libs/runtime</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hdfs-mirroring/src/main/resources</directory>
+            <outputDirectory>extensions/hdfs-mirroring/resources</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hive-mirroring/src/main/META</directory>
+            <outputDirectory>extensions/hive-mirroring/META</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
         </fileSet>
 
         <fileSet>
-            <directory>../addons/recipes/hive-disaster-recovery/src/main/resources</directory>
-            <outputDirectory>data-mirroring/hive-disaster-recovery</outputDirectory>
-        <fileMode>0644</fileMode>
+            <directory>./</directory>
+            <fileMode>0755</fileMode>
+            <outputDirectory>extensions/hive-mirroring/libs/build</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>./</directory>
+            <outputDirectory>extensions/hive-mirroring/libs/runtime</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hive-mirroring/src/main/resources</directory>
+            <outputDirectory>extensions/hive-mirroring/resources</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
         </fileSet>
 
         <fileSet>
@@ -181,6 +236,18 @@
             <outputDirectory>oozie/conf</outputDirectory>
             <fileMode>0644</fileMode>
         </file>
+
+        <file>
+            <source>../addons/extensions/hdfs-mirroring/README</source>
+            <outputDirectory>extensions/hdfs-mirroring</outputDirectory>
+            <fileMode>0755</fileMode>
+        </file>
+
+        <file>
+            <source>../addons/extensions/hive-mirroring/README</source>
+            <outputDirectory>extensions/hive-mirroring</outputDirectory>
+            <fileMode>0755</fileMode>
+        </file>
     </files>
 </assembly>
     

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/src/main/assemblies/standalone-package.xml
----------------------------------------------------------------------
diff --git a/src/main/assemblies/standalone-package.xml b/src/main/assemblies/standalone-package.xml
index bef19ce..eadd972 100644
--- a/src/main/assemblies/standalone-package.xml
+++ b/src/main/assemblies/standalone-package.xml
@@ -121,15 +121,71 @@
         </fileSet>
 
         <fileSet>
-            <directory>../addons/recipes/hdfs-replication/src/main/resources</directory>
-            <outputDirectory>data-mirroring/hdfs-replication</outputDirectory>
-            <fileMode>0644</fileMode>
+            <directory>addons/extensions/hdfs-mirroring/src/main/META</directory>
+            <outputDirectory>extensions/hdfs-mirroring/META</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
         </fileSet>
 
         <fileSet>
-            <directory>../addons/recipes/hive-disaster-recovery/src/main/resources</directory>
-            <outputDirectory>data-mirroring/hive-disaster-recovery</outputDirectory>
-            <fileMode>0644</fileMode>
+            <directory>./</directory>
+            <outputDirectory>extensions/hdfs-mirroring/libs/build</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>./</directory>
+            <outputDirectory>extensions/hdfs-mirroring/libs/runtime</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hdfs-mirroring/src/main/resources</directory>
+            <outputDirectory>extensions/hdfs-mirroring/resources</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hive-mirroring/src/main/META</directory>
+            <outputDirectory>extensions/hive-mirroring/META</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>./</directory>
+            <fileMode>0755</fileMode>
+            <outputDirectory>extensions/hive-mirroring/libs/build</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>./</directory>
+            <outputDirectory>extensions/hive-mirroring/libs/runtime</outputDirectory>
+            <excludes>
+                <exclude>*/**</exclude>
+            </excludes>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
+        </fileSet>
+
+        <fileSet>
+            <directory>addons/extensions/hive-mirroring/src/main/resources</directory>
+            <outputDirectory>extensions/hive-mirroring/resources</outputDirectory>
+            <fileMode>0755</fileMode>
+            <directoryMode>0755</directoryMode>
         </fileSet>
     </fileSets>
 
@@ -165,5 +221,17 @@
             <destName>falcon.war</destName>
             <fileMode>0644</fileMode>
         </file>
+
+        <file>
+            <source>../addons/extensions/hdfs-mirroring/README</source>
+            <outputDirectory>extensions/hdfs-mirroring</outputDirectory>
+            <fileMode>0755</fileMode>
+        </file>
+
+        <file>
+            <source>../addons/extensions/hive-mirroring/README</source>
+            <outputDirectory>extensions/hive-mirroring</outputDirectory>
+            <fileMode>0755</fileMode>
+        </file>
     </files>
 </assembly>


[2/3] falcon git commit: FALCON-1107 Move trusted extensions processing to server side

Posted by ba...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
deleted file mode 100644
index 4642835..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-##### Unique recipe job name
-falcon.recipe.name=sales-monthly
-
-##### Workflow properties
-falcon.recipe.workflow.name=hdfs-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/apps/data-mirroring/workflows/hdfs-replication-workflow.xml
-# Provide Wf lib absolute path. This can be HDFS or local FS path. If libs are on local FS it will be copied to HDFS
-#falcon.recipe.workflow.lib.path=/recipes/hdfs-replication/lib
-
-##### Cluster properties
-# Cluster where job should run
-falcon.recipe.cluster.name=primaryCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://240.0.0.10:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2015-03-13T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-
-##### Scheduling properties
-# Change the recipe frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(5)
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-falcon.recipe.acl.owner=ambari-qa
-falcon.recipe.acl.group=users
-falcon.recipe.acl.permission=0x755
-falcon.recipe.nn.principal=nn/_HOST@EXAMPLE.COM
-
-##### Custom Job properties
-
-# Specify multiple comma separated source directories
-drSourceDir=/user/hrt_qa/dr/test/primaryCluster/input
-drSourceClusterFS=hdfs://240.0.0.10:8020
-drTargetDir=/user/hrt_qa/dr/test/backupCluster/input
-drTargetClusterFS=hdfs://240.0.0.11:8020
-
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/README.txt
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/README.txt b/addons/recipes/hive-disaster-recovery/README.txt
deleted file mode 100644
index ab393b1..0000000
--- a/addons/recipes/hive-disaster-recovery/README.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hive Metastore Disaster Recovery Recipe
-
-Overview
-This recipe implements replicating hive metadata and data from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-*
-*
-
-Limitations
-*
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hive Metastore Disaster Recovery Recipe
-
-Overview
-This recipe implements replicating hive metadata and data from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-*
-*
-
-Limitations
-*

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/pom.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/pom.xml b/addons/recipes/hive-disaster-recovery/pom.xml
deleted file mode 100644
index 0f782d2..0000000
--- a/addons/recipes/hive-disaster-recovery/pom.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.apache.falcon.recipes</groupId>
-    <artifactId>falcon-hive-replication-recipe</artifactId>
-    <version>0.10-SNAPSHOT</version>
-    <description>Apache Falcon Hive Disaster Recovery Recipe</description>
-    <name>Apache Falcon Sample Hive Disaster Recovery Recipe</name>
-    <packaging>jar</packaging>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
deleted file mode 100644
index f0de091..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<process name="##name##" xmlns="uri:falcon:process:0.1">
-    <clusters>
-        <!--  source  -->
-        <cluster name="##cluster.name##">
-            <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
-        </cluster>
-    </clusters>
-
-    <tags>_falcon_mirroring_type=HIVE</tags>
-
-    <parallel>1</parallel>
-    <!-- Replication needs to run only once to catch up -->
-    <order>LAST_ONLY</order>
-    <frequency>##process.frequency##</frequency>
-    <timezone>UTC</timezone>
-
-    <properties>
-        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
-    </properties>
-
-    <workflow name="##workflow.name##" engine="oozie"
-              path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
-    <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
-    <notification type="##notification.type##" to="##notification.receivers##"/>
-    <ACL/>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
deleted file mode 100644
index 2d6b8be..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
+++ /dev/null
@@ -1,363 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
-    <credentials>
-        <credential name='hive_src_credentials' type='hcat'>
-            <property>
-                <name>hcat.metastore.uri</name>
-                <value>${sourceMetastoreUri}</value>
-            </property>
-            <property>
-                <name>hcat.metastore.principal</name>
-                <value>${sourceHiveMetastoreKerberosPrincipal}</value>
-            </property>
-        </credential>
-        <credential name='hive_tgt_credentials' type='hcat'>
-            <property>
-                <name>hcat.metastore.uri</name>
-                <value>${targetMetastoreUri}</value>
-            </property>
-            <property>
-                <name>hcat.metastore.principal</name>
-                <value>${targetHiveMetastoreKerberosPrincipal}</value>
-            </property>
-        </credential>
-        <credential name="hive2_src_credentials" type="hive2">
-            <property>
-                <name>hive2.server.principal</name>
-                <value>${sourceHive2KerberosPrincipal}</value>
-            </property>
-            <property>
-                <name>hive2.jdbc.url</name>
-                <value>jdbc:${sourceHiveServer2Uri}/${sourceDatabase}</value>
-            </property>
-        </credential>
-        <credential name="hive2_tgt_credentials" type="hive2">
-            <property>
-                <name>hive2.server.principal</name>
-                <value>${targetHive2KerberosPrincipal}</value>
-            </property>
-            <property>
-                <name>hive2.jdbc.url</name>
-                <value>jdbc:${targetHiveServer2Uri}/${sourceDatabase}</value>
-            </property>
-        </credential>
-    </credentials>
-    <start to='last-event'/>
-    <action name="last-event" cred="hive_tgt_credentials">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-                <property>
-                    <name>mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-sourceNNKerberosPrincipal</arg>
-            <arg>${sourceNNKerberosPrincipal}</arg>
-            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
-            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-sourceHive2KerberosPrincipal</arg>
-            <arg>${sourceHive2KerberosPrincipal}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-targetNNKerberosPrincipal</arg>
-            <arg>${targetNNKerberosPrincipal}</arg>
-            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
-            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-targetHive2KerberosPrincipal</arg>
-            <arg>${targetHive2KerberosPrincipal}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-clusterForJobNNKerberosPrincipal</arg>
-            <arg>${clusterForJobNNKerberosPrincipal}</arg>
-            <arg>-tdeEncryptionEnabled</arg>
-            <arg>${tdeEncryptionEnabled}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>lastevents</arg>
-        </java>
-        <ok to="export-dr-replication"/>
-        <error to="fail"/>
-    </action>
-    <!-- Export Replication action -->
-    <action name="export-dr-replication" cred="hive_src_credentials,hive2_src_credentials">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-                <property>
-                    <name>mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-replicationMaxMaps</arg>
-            <arg>${replicationMaxMaps}</arg>
-            <arg>-distcpMaxMaps</arg>
-            <arg>${distcpMaxMaps}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-sourceNNKerberosPrincipal</arg>
-            <arg>${sourceNNKerberosPrincipal}</arg>
-            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
-            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-sourceHive2KerberosPrincipal</arg>
-            <arg>${sourceHive2KerberosPrincipal}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-targetNNKerberosPrincipal</arg>
-            <arg>${targetNNKerberosPrincipal}</arg>
-            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
-            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-targetHive2KerberosPrincipal</arg>
-            <arg>${targetHive2KerberosPrincipal}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-distcpMapBandwidth</arg>
-            <arg>${distcpMapBandwidth}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-clusterForJobNNKerberosPrincipal</arg>
-            <arg>${clusterForJobNNKerberosPrincipal}</arg>
-            <arg>-tdeEncryptionEnabled</arg>
-            <arg>${tdeEncryptionEnabled}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>export</arg>
-            <arg>-counterLogDir</arg>
-            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
-        </java>
-        <ok to="import-dr-replication"/>
-        <error to="fail"/>
-    </action>
-    <!-- Import Replication action -->
-    <action name="import-dr-replication" cred="hive_tgt_credentials,hive2_tgt_credentials">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-                <property>
-                    <name>mapreduce.job.hdfs-servers</name>
-                    <value>${sourceNN},${targetNN}</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-replicationMaxMaps</arg>
-            <arg>${replicationMaxMaps}</arg>
-            <arg>-distcpMaxMaps</arg>
-            <arg>${distcpMaxMaps}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-sourceNNKerberosPrincipal</arg>
-            <arg>${sourceNNKerberosPrincipal}</arg>
-            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
-            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-sourceHive2KerberosPrincipal</arg>
-            <arg>${sourceHive2KerberosPrincipal}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-targetNNKerberosPrincipal</arg>
-            <arg>${targetNNKerberosPrincipal}</arg>
-            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
-            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
-            <arg>-targetHive2KerberosPrincipal</arg>
-            <arg>${targetHive2KerberosPrincipal}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-distcpMapBandwidth</arg>
-            <arg>${distcpMapBandwidth}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-clusterForJobNNKerberosPrincipal</arg>
-            <arg>${clusterForJobNNKerberosPrincipal}</arg>
-            <arg>-tdeEncryptionEnabled</arg>
-            <arg>${tdeEncryptionEnabled}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>import</arg>
-        </java>
-        <ok to="end"/>
-        <error to="fail"/>
-    </action>
-    <kill name="fail">
-        <message>
-            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
-        </message>
-    </kill>
-    <end name="end"/>
-</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
deleted file mode 100644
index 331d57e..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-falcon.recipe.name=hive-disaster-recovery
-
-
-##### Workflow properties
-falcon.recipe.workflow.name=hive-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/recipes/hive-replication/hive-disaster-recovery-secure-workflow.xml
-
-##### Cluster properties
-
-# Change the cluster name where replication job should run here
-falcon.recipe.cluster.name=backupCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://localhost:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2014-10-01T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-# Change the cluster namenode kerberos principal. This is mandatory on secure clusters.
-falcon.recipe.nn.principal=nn/_HOST@EXAMPLE.COM
-
-##### Scheduling properties
-
-# Change the process frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(60)
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=owner=landing,pipeline=adtech
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-#falcon.recipe.acl.owner=testuser
-#falcon.recipe.acl.group=group
-#falcon.recipe.acl.permission=0x755
-
-##### Custom Job properties
-
-##### Source Cluster DR properties
-sourceCluster=primaryCluster
-sourceMetastoreUri=thrift://localhost:9083
-sourceHiveServer2Uri=hive2://localhost:10000
-# For DB level replicaiton to replicate multiple databases specify comma separated list of tables
-sourceDatabase=default
-# For DB level replication specify * for sourceTable.
-# For table level replication to replicate multiple tables specify comma separated list of tables
-sourceTable=testtable_dr
-## Please specify staging dir in the source without fully qualified domain name.
-sourceStagingPath=/apps/hive/tools/dr
-sourceNN=hdfs://localhost:8020
-# Specify kerberos principal required to access source namenode and hive servers, optional on non-secure cluster.
-sourceNNKerberosPrincipal=nn/_HOST@EXAMPLE.COM
-sourceHiveMetastoreKerberosPrincipal=hive/_HOST@EXAMPLE.COM
-sourceHive2KerberosPrincipal=hive/_HOST@EXAMPLE.COM
-
-##### Target Cluster DR properties
-targetCluster=backupCluster
-targetMetastoreUri=thrift://localhost:9083
-targetHiveServer2Uri=hive2://localhost:10000
-## Please specify staging dir in the target without fully qualified domain name.
-targetStagingPath=/apps/hive/tools/dr
-targetNN=hdfs://localhost:8020
-# Specify kerberos principal required to access target namenode and hive servers, optional on non-secure cluster.
-targetNNKerberosPrincipal=nn/_HOST@EXAMPLE.COM
-targetHiveMetastoreKerberosPrincipal=hive/_HOST@EXAMPLE.COM
-targetHive2KerberosPrincipal=hive/_HOST@EXAMPLE.COM
-
-# To ceil the max events processed each time job runs. Set it to max value depending on your bandwidth limit.
-# Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!
-maxEvents=-1
-# Change it to specify the maximum number of mappers for replication
-replicationMaxMaps=5
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-# Set this flag to true if TDE encryption is enabled on source and target
-tdeEncryptionEnabled=false
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
deleted file mode 100644
index f0de091..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<process name="##name##" xmlns="uri:falcon:process:0.1">
-    <clusters>
-        <!--  source  -->
-        <cluster name="##cluster.name##">
-            <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
-        </cluster>
-    </clusters>
-
-    <tags>_falcon_mirroring_type=HIVE</tags>
-
-    <parallel>1</parallel>
-    <!-- Replication needs to run only once to catch up -->
-    <order>LAST_ONLY</order>
-    <frequency>##process.frequency##</frequency>
-    <timezone>UTC</timezone>
-
-    <properties>
-        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
-    </properties>
-
-    <workflow name="##workflow.name##" engine="oozie"
-              path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
-    <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
-    <notification type="##notification.type##" to="##notification.receivers##"/>
-    <ACL/>
-</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
deleted file mode 100644
index 296e049..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
+++ /dev/null
@@ -1,249 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
-    <start to='last-event'/>
-    <action name="last-event">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>lastevents</arg>
-        </java>
-        <ok to="export-dr-replication"/>
-        <error to="fail"/>
-    </action>
-    <!-- Export Replication action -->
-    <action name="export-dr-replication">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-replicationMaxMaps</arg>
-            <arg>${replicationMaxMaps}</arg>
-            <arg>-distcpMaxMaps</arg>
-            <arg>${distcpMaxMaps}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-distcpMapBandwidth</arg>
-            <arg>${distcpMapBandwidth}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>export</arg>
-            <arg>-counterLogDir</arg>
-            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
-        </java>
-        <ok to="import-dr-replication"/>
-        <error to="fail"/>
-    </action>
-    <!-- Import Replication action -->
-    <action name="import-dr-replication">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp,hive,hive2,hcatalog</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-falconLibPath</arg>
-            <arg>${wf:conf("falcon.libpath")}</arg>
-            <arg>-replicationMaxMaps</arg>
-            <arg>${replicationMaxMaps}</arg>
-            <arg>-distcpMaxMaps</arg>
-            <arg>${distcpMaxMaps}</arg>
-            <arg>-sourceCluster</arg>
-            <arg>${sourceCluster}</arg>
-            <arg>-sourceMetastoreUri</arg>
-            <arg>${sourceMetastoreUri}</arg>
-            <arg>-sourceHiveServer2Uri</arg>
-            <arg>${sourceHiveServer2Uri}</arg>
-            <arg>-sourceDatabase</arg>
-            <arg>${sourceDatabase}</arg>
-            <arg>-sourceTable</arg>
-            <arg>${sourceTable}</arg>
-            <arg>-sourceStagingPath</arg>
-            <arg>${sourceStagingPath}</arg>
-            <arg>-sourceNN</arg>
-            <arg>${sourceNN}</arg>
-            <arg>-targetCluster</arg>
-            <arg>${targetCluster}</arg>
-            <arg>-targetMetastoreUri</arg>
-            <arg>${targetMetastoreUri}</arg>
-            <arg>-targetHiveServer2Uri</arg>
-            <arg>${targetHiveServer2Uri}</arg>
-            <arg>-targetStagingPath</arg>
-            <arg>${targetStagingPath}</arg>
-            <arg>-targetNN</arg>
-            <arg>${targetNN}</arg>
-            <arg>-maxEvents</arg>
-            <arg>${maxEvents}</arg>
-            <arg>-distcpMapBandwidth</arg>
-            <arg>${distcpMapBandwidth}</arg>
-            <arg>-clusterForJobRun</arg>
-            <arg>${clusterForJobRun}</arg>
-            <arg>-clusterForJobRunWriteEP</arg>
-            <arg>${clusterForJobRunWriteEP}</arg>
-            <arg>-drJobName</arg>
-            <arg>${drJobName}-${nominalTime}</arg>
-            <arg>-executionStage</arg>
-            <arg>import</arg>
-        </java>
-        <ok to="end"/>
-        <error to="fail"/>
-    </action>
-    <kill name="fail">
-        <message>
-            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
-        </message>
-    </kill>
-    <end name="end"/>
-</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
deleted file mode 100644
index b14ec7c..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-falcon.recipe.name=hive-disaster-recovery
-
-
-##### Workflow properties
-falcon.recipe.workflow.name=hive-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/recipes/hive-replication/hive-disaster-recovery-workflow.xml
-
-##### Cluster properties
-
-# Change the cluster name where replication job should run here
-falcon.recipe.cluster.name=backupCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://localhost:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2014-10-01T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-
-##### Scheduling properties
-
-# Change the process frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(60)
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=owner=landing,pipeline=adtech
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-#falcon.recipe.acl.owner=testuser
-#falcon.recipe.acl.group=group
-#falcon.recipe.acl.permission=0x755
-
-##### Custom Job properties
-
-##### Source Cluster DR properties
-sourceCluster=primaryCluster
-sourceMetastoreUri=thrift://localhost:9083
-sourceHiveServer2Uri=hive2://localhost:10000
-# For DB level replicaiton to replicate multiple databases specify comma separated list of tables
-sourceDatabase=default
-# For DB level replication specify * for sourceTable.
-# For table level replication to replicate multiple tables specify comma separated list of tables
-sourceTable=testtable_dr
-## Please specify staging dir in the source without fully qualified domain name.
-sourceStagingPath=/apps/hive/tools/dr
-sourceNN=hdfs://localhost:8020
-
-##### Target Cluster DR properties
-targetCluster=backupCluster
-targetMetastoreUri=thrift://localhost:9083
-targetHiveServer2Uri=hive2://localhost:10000
-## Please specify staging dir in the target without fully qualified domain name.
-targetStagingPath=/apps/hive/tools/dr
-targetNN=hdfs://localhost:8020
-
-# To ceil the max events processed each time job runs. Set it to max value depending on your bandwidth limit.
-# Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!
-maxEvents=-1
-# Change it to specify the maximum number of mappers for replication
-replicationMaxMaps=5
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java b/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
index 41d50df..30a0118 100644
--- a/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
+++ b/common/src/main/java/org/apache/falcon/catalog/AbstractCatalogService.java
@@ -40,6 +40,18 @@ public abstract class AbstractCatalogService {
     public abstract boolean isAlive(Configuration conf, String catalogUrl) throws FalconException;
 
     /**
+     * This method checks if the given db exists in the catalog.
+     *
+     * @param conf  conf
+     * @param catalogUrl url for the catalog service
+     * @param database database the table belongs to
+     * @return if the db exists
+     * @throws FalconException exception
+     */
+    public abstract boolean dbExists(Configuration conf, String catalogUrl,
+                                     String databaseName) throws FalconException;
+
+    /**
      * This method checks if the given table exists in the catalog.
      *
      * @param conf  conf

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java b/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
index 872f91f..4ad62d7 100644
--- a/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
+++ b/common/src/main/java/org/apache/falcon/catalog/HiveCatalogService.java
@@ -208,6 +208,22 @@ public class HiveCatalogService extends AbstractCatalogService {
     }
 
     @Override
+    public boolean dbExists(Configuration conf, final String catalogUrl,
+                            final String databaseName) throws FalconException {
+        LOG.info("Checking if the db exists: {}", databaseName);
+
+        try {
+            HiveMetaStoreClient client = createProxiedClient(conf, catalogUrl);
+            Database db = client.getDatabase(databaseName);
+            return db != null;
+        } catch (NoSuchObjectException e) {
+            return false;
+        } catch (Exception e) {
+            throw new FalconException("Exception checking if the db exists:" + e.getMessage(), e);
+        }
+    }
+
+    @Override
     public boolean tableExists(Configuration conf, final String catalogUrl, final String database,
                                final String tableName) throws FalconException {
         LOG.info("Checking if the table exists: {}", tableName);

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/common/src/main/resources/startup.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/startup.properties b/common/src/main/resources/startup.properties
index 87a74bf..dd51963 100644
--- a/common/src/main/resources/startup.properties
+++ b/common/src/main/resources/startup.properties
@@ -100,6 +100,8 @@ debug.libext.feed.retention.paths=${falcon.libext}
 debug.libext.feed.replication.paths=${falcon.libext}
 debug.libext.process.paths=${falcon.libext}
 
+debug.extension.store.uri=file://${user.dir}/target/recipe/store
+
 #Configurations used in ITs
 it.config.store.uri=file://${user.dir}/target/store
 it.config.oozie.conf.uri=${user.dir}/target/oozie

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/pom.xml
----------------------------------------------------------------------
diff --git a/extensions/pom.xml b/extensions/pom.xml
new file mode 100644
index 0000000..6a0725a
--- /dev/null
+++ b/extensions/pom.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.apache.falcon</groupId>
+        <artifactId>falcon-main</artifactId>
+        <version>0.10-SNAPSHOT</version>
+    </parent>
+    <artifactId>falcon-extensions</artifactId>
+    <description>Apache Falcon server side extensions Module</description>
+    <name>Apache Falcon extensions</name>
+    <packaging>jar</packaging>
+
+    <profiles>
+        <profile>
+            <id>hadoop-2</id>
+            <activation>
+                <activeByDefault>true</activeByDefault>
+            </activation>
+            <dependencies>
+                <dependency>
+                    <groupId>org.apache.hadoop</groupId>
+                    <artifactId>hadoop-client</artifactId>
+                </dependency>
+            </dependencies>
+        </profile>
+    </profiles>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-client</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-common</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.testng</groupId>
+            <artifactId>testng</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.4</version>
+                <configuration>
+                    <excludes>
+                        <exclude>**/log4j.xml</exclude>
+                    </excludes>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java
new file mode 100644
index 0000000..11b3725
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/AbstractExtension.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.apache.falcon.FalconException;
+import org.apache.falcon.extensions.mirroring.hdfs.HdfsMirroringExtension;
+import org.apache.falcon.extensions.mirroring.hive.HiveMirroringExtension;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Abstract extension class to be extended by all trusted and custom extensions.
+ */
+public abstract class AbstractExtension {
+    private static final List<String> TRUSTED_EXTENSIONS = Arrays.asList(
+            new HdfsMirroringExtension().getName().toUpperCase(),
+            new HiveMirroringExtension().getName().toUpperCase());
+    private static List<AbstractExtension> extensions = new ArrayList<>();
+
+    public static List<AbstractExtension> getExtensions() {
+        if (extensions.isEmpty()) {
+            extensions.add(new HdfsMirroringExtension());
+            extensions.add(new HiveMirroringExtension());
+        }
+        return extensions;
+    }
+
+    public static boolean isExtensionTrusted(final String extensionName) {
+        return TRUSTED_EXTENSIONS.contains(extensionName.toUpperCase());
+    }
+
+    /* Name cannot be null */
+    public abstract String getName();
+
+    public abstract void validate(final Properties extensionProperties) throws FalconException;
+
+    public abstract Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException;
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/Extension.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/Extension.java b/extensions/src/main/java/org/apache/falcon/extensions/Extension.java
new file mode 100644
index 0000000..4ab9f5d
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/Extension.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.v0.Entity;
+import org.apache.falcon.extensions.store.ExtensionStore;
+import org.apache.falcon.extensions.util.ExtensionProcessBuilderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Extension class to construct well formed entities from the templates for trusted extensions.
+ */
+public class Extension implements ExtensionBuilder {
+    private static final Logger LOG = LoggerFactory.getLogger(Extension.class);
+    private static final String EXTENSION_WF_SUFFIX = "-workflow.xml";
+    private static final String EXTENSION_TEMPLATE_SUFFIX = "-template.xml";
+
+    private static void validateProperties(final Properties extensionProperties) throws FalconException {
+        for (ExtensionProperties option : ExtensionProperties.values()) {
+            if (extensionProperties.getProperty(option.getName()) == null && option.isRequired()) {
+                throw new FalconException("Missing extension property: " + option.getName());
+            }
+        }
+    }
+
+    private static String getExtensionTemplate(final ExtensionStore store,
+                                            final Map<String, String> extensionResources,
+                                            final String extensionName) throws FalconException {
+        return store.getExtensionResource(extensionResources.get(extensionName.toLowerCase()
+                + EXTENSION_TEMPLATE_SUFFIX));
+    }
+
+    private static String getWFPath(final Map<String, String> extensionResources,
+                                    final String extensionName) {
+        return extensionResources.get(extensionName.toLowerCase() + EXTENSION_WF_SUFFIX);
+    }
+
+    @Override
+    public List<Entity> getEntities(final String extensionName, final Properties extensionProperties)
+        throws FalconException {
+        if (StringUtils.isBlank(extensionName)) {
+            throw new FalconException("Extension name cannot be null or empty");
+        }
+        validateProperties(extensionProperties);
+
+        String name = extensionName.toLowerCase();
+        AbstractExtension extension = ExtensionFactory.getExtensionType(name);
+        if (extension != null) {
+            extension.validate(extensionProperties);
+            Properties props = extension.getAdditionalProperties(extensionProperties);
+            if (props != null && !props.isEmpty()) {
+                extensionProperties.putAll(props);
+            }
+        }
+
+        ExtensionStore store = ExtensionService.getExtensionStore();
+
+        String resourceName = extensionProperties.getProperty(ExtensionProperties.RESOURCE_NAME.getName());
+        if (StringUtils.isBlank(resourceName)) {
+            resourceName = name;
+        }
+
+        Map<String, String> extensionResources = store.getExtensionResources(name);
+        /* Get the resources */
+        String extensionTemplate = getExtensionTemplate(store, extensionResources, resourceName);
+        String wfPath = getWFPath(extensionResources, resourceName);
+
+        /* Get Lib path */
+        String wfLibPath = store.getExtensionLibPath(name);
+        Entity entity = ExtensionProcessBuilderUtils.createProcessFromTemplate(extensionTemplate,
+                name, extensionProperties, wfPath, wfLibPath);
+        if (entity == null) {
+            throw new FalconException("Entity created from the extension template cannot be null");
+        }
+        LOG.info("Extension processing complete");
+        return Arrays.asList(entity);
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/ExtensionBuilder.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/ExtensionBuilder.java b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionBuilder.java
new file mode 100644
index 0000000..bd52ed4
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionBuilder.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.v0.Entity;
+
+import java.util.Properties;
+import java.util.List;
+
+/**
+ * Extension interface to be implemented by all trusted and custom extensions.
+ */
+public interface ExtensionBuilder {
+    List<Entity> getEntities(final String extensionName, final Properties extensionProperties) throws FalconException;
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/ExtensionFactory.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/ExtensionFactory.java b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionFactory.java
new file mode 100644
index 0000000..41e0648
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionFactory.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.falcon.FalconException;
+
+import java.util.List;
+
+/**
+ * Extension Factory class to get the corresponding extension.
+ */
+public final class ExtensionFactory {
+
+    private ExtensionFactory() {
+    }
+
+    public static AbstractExtension getExtensionType(final String extensionName) throws FalconException {
+        if (StringUtils.isBlank(extensionName)) {
+            return null;
+        }
+
+        List<AbstractExtension> extensions = AbstractExtension.getExtensions();
+
+        for (AbstractExtension extension : extensions) {
+            if (extensionName.equalsIgnoreCase(extension.getName())) {
+                return extension;
+            }
+        }
+        return null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/ExtensionProperties.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/ExtensionProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionProperties.java
new file mode 100644
index 0000000..b543d73
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionProperties.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Extension options.
+ */
+public enum ExtensionProperties {
+    JOB_NAME("jobName", "job name"),
+    RESOURCE_NAME("resourceName", "resource name", false),
+    //Can't use default as target as extension is generic and need not be used only for replication
+    CLUSTER_NAME("jobClusterName", "Cluster name where job should run"),
+    VALIDITY_START("jobValidityStart", "Job validity start"),
+    VALIDITY_END("jobValidityEnd", "Job validity end"),
+    FREQUENCY("jobFrequency", "Job frequency"),
+    TIMEZONE("jobTimezone", "Time zone", false),
+    // Use defaults for retry
+    RETRY_POLICY("jobRetryPolicy", "Retry policy", false),
+    RETRY_DELAY("jobRetryDelay", "Retry delay", false),
+    RETRY_ATTEMPTS("jobRetryAttempts", "Retry attempts", false),
+    RETRY_ON_TIMEOUT("jobRetryOnTimeout", "Retry onTimeout", false),
+    JOB_TAGS("jobTags", "Job tags", false),
+    JOB_ACL_OWNER("jobAclOwner", "Job acl owner", false),
+    JOB_ACL_GROUP("jobAclGroup", "Job acl group", false),
+    JOB_ACL_PERMISSION("jobAclPermission", "Job acl permission", false),
+    JOB_NOTIFICATION_TYPE("jobNotificationType", "Notification Type", false),
+    JOB_NOTIFICATION_ADDRESS("jobNotificationReceivers", "Email Notification receivers", false);
+
+    private final String name;
+    private final String description;
+    private final boolean isRequired;
+
+    private static Map<String, ExtensionProperties> optionsMap = new HashMap<>();
+    static {
+        for (ExtensionProperties c : ExtensionProperties.values()) {
+            optionsMap.put(c.getName(), c);
+        }
+    }
+
+    public static Map<String, ExtensionProperties> getOptionsMap() {
+        return optionsMap;
+    }
+
+    ExtensionProperties(String name, String description) {
+        this(name, description, true);
+    }
+
+    ExtensionProperties(String name, String description, boolean isRequired) {
+        this.name = name;
+        this.description = description;
+        this.isRequired = isRequired;
+    }
+
+    public String getName() {
+        return this.name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public boolean isRequired() {
+        return isRequired;
+    }
+
+    @Override
+    public String toString() {
+        return getName();
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/ExtensionService.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/ExtensionService.java b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionService.java
new file mode 100644
index 0000000..ffed6ca
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/ExtensionService.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions;
+
+import org.apache.falcon.FalconException;
+import org.apache.falcon.extensions.store.ExtensionStore;
+import org.apache.falcon.service.FalconService;
+
+/**
+ * Extension service.
+ */
+public class ExtensionService implements FalconService {
+    private static ExtensionStore extensionStore = ExtensionStore.get();
+
+    public static final String SERVICE_NAME = ExtensionService.class.getSimpleName();
+
+    @Override
+    public String getName() {
+        return SERVICE_NAME;
+    }
+
+    @Override
+    public void init() throws FalconException {
+    }
+
+    @Override
+    public void destroy() throws FalconException {
+    }
+
+    public static ExtensionStore getExtensionStore() {
+        return extensionStore;
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java
new file mode 100644
index 0000000..f1acae2
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.mirroring.hdfs;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.ClusterHelper;
+import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.extensions.AbstractExtension;
+
+import java.util.Properties;
+
+/**
+ * Hdfs mirroring extension.
+ */
+public class HdfsMirroringExtension extends AbstractExtension {
+
+    private static final String COMMA_SEPARATOR = ",";
+    private static final String EXTENSION_NAME = "HDFS-MIRRORING";
+
+    @Override
+    public String getName() {
+        return EXTENSION_NAME;
+    }
+
+    @Override
+    public void validate(final Properties extensionProperties) throws FalconException {
+        for (HdfsMirroringExtensionProperties option : HdfsMirroringExtensionProperties.values()) {
+            if (extensionProperties.getProperty(option.getName()) == null && option.isRequired()) {
+                throw new FalconException("Missing extension property: " + option.getName());
+            }
+        }
+    }
+
+    @Override
+    public Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException {
+        Properties additionalProperties = new Properties();
+
+        // Add default properties if not passed
+        String distcpMaxMaps = extensionProperties.getProperty(HdfsMirroringExtensionProperties.MAX_MAPS.getName());
+        if (StringUtils.isBlank(distcpMaxMaps)) {
+            additionalProperties.put(HdfsMirroringExtensionProperties.MAX_MAPS.getName(), "1");
+        }
+
+        String distcpMapBandwidth = extensionProperties.getProperty(
+                HdfsMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName());
+        if (StringUtils.isBlank(distcpMapBandwidth)) {
+            additionalProperties.put(HdfsMirroringExtensionProperties.MAP_BANDWIDTH_IN_MB.getName(), "100");
+        }
+
+        // Construct fully qualified hdfs src path
+        String srcPaths = extensionProperties.getProperty(HdfsMirroringExtensionProperties
+                .SOURCE_DIR.getName());
+        StringBuilder absoluteSrcPaths = new StringBuilder();
+        String sourceClusterName = extensionProperties.getProperty(
+                HdfsMirroringExtensionProperties.SOURCE_CLUSTER.getName());
+
+        // Since source cluster get read interface
+        Cluster srcCluster = ClusterHelper.getCluster(sourceClusterName);
+        if (srcCluster == null) {
+            throw new FalconException("Cluster entity " + sourceClusterName + " not found");
+        }
+        String srcClusterEndPoint = ClusterHelper.getReadOnlyStorageUrl(srcCluster);
+
+        if (StringUtils.isNotBlank(srcPaths)) {
+            String[] paths = srcPaths.split(COMMA_SEPARATOR);
+
+            for (String path : paths) {
+                StringBuilder srcpath = new StringBuilder(srcClusterEndPoint);
+                srcpath.append(path.trim());
+                srcpath.append(COMMA_SEPARATOR);
+                absoluteSrcPaths.append(srcpath);
+            }
+        }
+
+        additionalProperties.put(HdfsMirroringExtensionProperties.SOURCE_DIR.getName(),
+                StringUtils.removeEnd(absoluteSrcPaths.toString(), COMMA_SEPARATOR));
+
+        // add sourceClusterFS and targetClusterFS
+        additionalProperties.put(HdfsMirroringExtensionProperties.SOURCE_CLUSTER_FS_WRITE_ENDPOINT.getName(),
+                ClusterHelper.getStorageUrl(srcCluster));
+
+        String targetClusterName = extensionProperties.getProperty(
+                HdfsMirroringExtensionProperties.TARGET_CLUSTER.getName());
+
+        Cluster targetCluster = ClusterHelper.getCluster(targetClusterName);
+        if (targetCluster == null) {
+            throw new FalconException("Cluster entity " + targetClusterName + " not found");
+        }
+        additionalProperties.put(HdfsMirroringExtensionProperties.TARGET_CLUSTER_FS_WRITE_ENDPOINT.getName(),
+                ClusterHelper.getStorageUrl(targetCluster));
+        return additionalProperties;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java
----------------------------------------------------------------------
diff --git a/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java
new file mode 100644
index 0000000..7d24b45
--- /dev/null
+++ b/extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtensionProperties.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.extensions.mirroring.hdfs;
+
+/**
+ * Hdfs Extension properties.
+ */
+public enum HdfsMirroringExtensionProperties {
+    SOURCE_DIR("sourceDir", "Location of source data to replicate"),
+    SOURCE_CLUSTER("sourceCluster", "Source cluster"),
+    SOURCE_CLUSTER_FS_WRITE_ENDPOINT("sourceClusterFS", "Source cluster end point", false),
+    TARGET_DIR("targetDir", "Location on target cluster for replication"),
+    TARGET_CLUSTER("targetCluster", "Target cluster"),
+    TARGET_CLUSTER_FS_WRITE_ENDPOINT("targetClusterFS", "Target cluster end point", false),
+    MAX_MAPS("distcpMaxMaps", "Maximum number of maps used during replication", false),
+    MAP_BANDWIDTH_IN_MB("distcpMapBandwidth", "Bandwidth in MB/s used by each mapper during replication",
+            false);
+
+    private final String name;
+    private final String description;
+    private final boolean isRequired;
+
+    HdfsMirroringExtensionProperties(String name, String description) {
+        this(name, description, true);
+    }
+
+    HdfsMirroringExtensionProperties(String name, String description, boolean isRequired) {
+        this.name = name;
+        this.description = description;
+        this.isRequired = isRequired;
+    }
+
+    public String getName() {
+        return this.name;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+
+    public boolean isRequired() {
+        return isRequired;
+    }
+
+    @Override
+    public String toString() {
+        return getName();
+    }
+}


[3/3] falcon git commit: FALCON-1107 Move trusted extensions processing to server side

Posted by ba...@apache.org.
FALCON-1107 Move trusted extensions processing to server side

Ignore any documentation issues as it will be addressed in https://issues.apache.org/jira/browse/FALCON-1106. Thanks!

Author: Sowmya Ramesh <sr...@hortonworks.com>

Reviewers: "Balu Vellanki <ba...@apache.org>, Venkat Ranganathan <ve...@hortonworks.com>"

Closes #92 from sowmyaramesh/FALCON-1107


Project: http://git-wip-us.apache.org/repos/asf/falcon/repo
Commit: http://git-wip-us.apache.org/repos/asf/falcon/commit/95bf312f
Tree: http://git-wip-us.apache.org/repos/asf/falcon/tree/95bf312f
Diff: http://git-wip-us.apache.org/repos/asf/falcon/diff/95bf312f

Branch: refs/heads/master
Commit: 95bf312f46bc96bc247645da6500b495c21aede3
Parents: c52961c
Author: Sowmya Ramesh <sr...@hortonworks.com>
Authored: Tue Apr 12 16:05:48 2016 -0700
Committer: bvellanki <bv...@hortonworks.com>
Committed: Tue Apr 12 16:05:48 2016 -0700

----------------------------------------------------------------------
 addons/extensions/hdfs-mirroring/README         |  29 ++
 addons/extensions/hdfs-mirroring/pom.xml        |  32 ++
 .../main/META/hdfs-mirroring-properties.json    | 137 +++++++
 .../runtime/hdfs-mirroring-template.xml         |  45 +++
 .../runtime/hdfs-mirroring-workflow.xml         |  82 +++++
 addons/extensions/hive-mirroring/README         |  58 +++
 addons/extensions/hive-mirroring/pom.xml        |  32 ++
 .../main/META/hive-mirroring-properties.json    | 179 +++++++++
 .../META/hive-mirroring-secure-properties.json  | 191 ++++++++++
 .../runtime/hive-mirroring-secure-template.xml  |  45 +++
 .../runtime/hive-mirroring-secure-workflow.xml  | 363 +++++++++++++++++++
 .../runtime/hive-mirroring-template.xml         |  45 +++
 .../runtime/hive-mirroring-workflow.xml         | 255 +++++++++++++
 .../java/org/apache/falcon/hive/HiveDRArgs.java |   9 +-
 .../org/apache/falcon/hive/HiveDROptions.java   |  38 +-
 addons/recipes/hdfs-replication/README.txt      |  29 --
 addons/recipes/hdfs-replication/pom.xml         |  32 --
 .../resources/hdfs-replication-template.xml     |  44 ---
 .../resources/hdfs-replication-workflow.xml     |  82 -----
 .../main/resources/hdfs-replication.properties  |  79 ----
 .../recipes/hive-disaster-recovery/README.txt   |  58 ---
 addons/recipes/hive-disaster-recovery/pom.xml   |  32 --
 .../hive-disaster-recovery-secure-template.xml  |  45 ---
 .../hive-disaster-recovery-secure-workflow.xml  | 363 -------------------
 .../hive-disaster-recovery-secure.properties    | 110 ------
 .../hive-disaster-recovery-template.xml         |  45 ---
 .../hive-disaster-recovery-workflow.xml         | 249 -------------
 .../resources/hive-disaster-recovery.properties |  98 -----
 .../falcon/catalog/AbstractCatalogService.java  |  12 +
 .../falcon/catalog/HiveCatalogService.java      |  16 +
 common/src/main/resources/startup.properties    |   2 +
 extensions/pom.xml                              | 112 ++++++
 .../falcon/extensions/AbstractExtension.java    |  58 +++
 .../org/apache/falcon/extensions/Extension.java | 102 ++++++
 .../falcon/extensions/ExtensionBuilder.java     |  32 ++
 .../falcon/extensions/ExtensionFactory.java     |  48 +++
 .../falcon/extensions/ExtensionProperties.java  |  89 +++++
 .../falcon/extensions/ExtensionService.java     |  49 +++
 .../mirroring/hdfs/HdfsMirroringExtension.java  | 111 ++++++
 .../hdfs/HdfsMirroringExtensionProperties.java  |  65 ++++
 .../mirroring/hive/HiveMirroringExtension.java  | 231 ++++++++++++
 .../hive/HiveMirroringExtensionProperties.java  |  92 +++++
 .../falcon/extensions/store/ExtensionStore.java | 215 +++++++++++
 .../util/ExtensionProcessBuilderUtils.java      | 309 ++++++++++++++++
 .../falcon/extensions/ExtensionServiceTest.java |  53 +++
 .../apache/falcon/extensions/ExtensionTest.java | 160 ++++++++
 .../store/AbstractTestExtensionStore.java       | 103 ++++++
 .../extensions/store/ExtensionStoreTest.java    |  65 ++++
 .../src/test/resources/backup-cluster-0.1.xml   |  44 +++
 .../test/resources/hdfs-mirroring-template.xml  |  45 +++
 .../test/resources/hive-mirroring-template.xml  |  45 +++
 .../src/test/resources/primary-cluster-0.1.xml  |  44 +++
 oozie/pom.xml                                   |   6 +
 .../service/SharedLibraryHostingService.java    |  91 ++++-
 pom.xml                                         |   2 +
 src/main/assemblies/distributed-package.xml     |  79 +++-
 src/main/assemblies/standalone-package.xml      |  80 +++-
 57 files changed, 3851 insertions(+), 1315 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hdfs-mirroring/README
----------------------------------------------------------------------
diff --git a/addons/extensions/hdfs-mirroring/README b/addons/extensions/hdfs-mirroring/README
new file mode 100644
index 0000000..78f1726
--- /dev/null
+++ b/addons/extensions/hdfs-mirroring/README
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+HDFS Directory Replication Extension
+
+Overview
+This extension implements replicating arbitrary directories on HDFS from one
+Hadoop cluster to another Hadoop cluster.
+This piggy backs on replication solution in Falcon which uses the DistCp tool.
+
+Use Case
+* Copy directories between HDFS clusters with out dated partitions
+* Archive directories from HDFS to Cloud. Ex: S3, Azure WASB
+
+Limitations
+As the data volume and number of files grow, this can get inefficient.

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hdfs-mirroring/pom.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hdfs-mirroring/pom.xml b/addons/extensions/hdfs-mirroring/pom.xml
new file mode 100644
index 0000000..cb9304e
--- /dev/null
+++ b/addons/extensions/hdfs-mirroring/pom.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.falcon.extensions</groupId>
+    <artifactId>falcon-hdfs-mirroring-extension</artifactId>
+    <version>0.10-SNAPSHOT</version>
+    <description>Apache Falcon sample Hdfs mirroring extension</description>
+    <name>Apache Falcon sample Hdfs mirroring extension</name>
+    <packaging>jar</packaging>
+</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json
----------------------------------------------------------------------
diff --git a/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json b/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json
new file mode 100644
index 0000000..f1b4775
--- /dev/null
+++ b/addons/extensions/hdfs-mirroring/src/main/META/hdfs-mirroring-properties.json
@@ -0,0 +1,137 @@
+{
+    "shortDescription": "This extension implements replicating arbitrary directories on HDFS from one Hadoop cluster to another Hadoop cluster. This piggy backs on replication solution in Falcon which uses the DistCp tool.",
+    "properties":[
+        {
+            "propertyName":"jobName",
+            "required":true,
+            "description":"Unique job name",
+            "example":"hdfs-monthly-sales-dr"
+        },
+        {
+            "propertyName":"jobClusterName",
+            "required":true,
+            "description":"Cluster where job should run",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"jobValidityStart",
+            "required":true,
+            "description":"Job validity start time",
+            "example":"2016-03-03T00:00Z"
+        },
+        {
+            "propertyName":"jobValidityEnd",
+            "required":true,
+            "description":"Job validity end time",
+            "example":"2018-03-13T00:00Z"
+        },
+        {
+            "propertyName":"jobFrequency",
+            "required":true,
+            "description":"job frequency. Valid frequency types are minutes, hours, days, months",
+            "example":"months(1)"
+        },
+        {
+            "propertyName":"jobTimezone",
+            "required":false,
+            "description":"Time zone for the job",
+            "example":"GMT"
+        },
+        {
+            "propertyName":"jobTags",
+            "required":false,
+            "description":"list of comma separated tags. Key Value Pairs, separated by comma",
+            "example":"consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting"
+        },
+        {
+            "propertyName":"jobRetryPolicy",
+            "required":false,
+            "description":"Job retry policy",
+            "example":"periodic"
+        },
+        {
+            "propertyName":"jobRetryDelay",
+            "required":false,
+            "description":"Job retry delay",
+            "example":"minutes(30)"
+        },
+        {
+            "propertyName":"jobRetryAttempts",
+            "required":false,
+            "description":"Job retry attempts",
+            "example":"3"
+        },
+        {
+            "propertyName":"jobRetryOnTimeout",
+            "required":false,
+            "description":"Job retry on timeout",
+            "example":"true"
+        },
+        {
+            "propertyName":"jobAclOwner",
+            "required":false,
+            "description":"ACL owner",
+            "example":"ambari-qa"
+        },
+        {
+            "propertyName":"jobAclGroup",
+            "required":false,
+            "description":"ACL group",
+            "example":"users"
+        },
+        {
+            "propertyName":"jobAclPermission",
+            "required":false,
+            "description":"ACL permission",
+            "example":"0x755"
+        },
+        {
+            "propertyName":"sourceDir",
+            "required":true,
+            "description":"Multiple hdfs comma separated source directories",
+            "example":"/user/ambari-qa/primaryCluster/dr/input1, /user/ambari-qa/primaryCluster/dr/input2"
+        },
+        {
+            "propertyName":"sourceCluster",
+            "required":true,
+            "description":"Source cluster for hdfs mirroring",
+            "example":"primaryCluster"
+        },
+        {
+            "propertyName":"targetDir",
+            "required":true,
+            "description":"Target hdfs directory",
+            "example":"/user/ambari-qa/backupCluster/dr"
+        },
+        {
+            "propertyName":"targetCluster",
+            "required":true,
+            "description":"Target cluster for hdfs mirroring",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"distcpMaxMaps",
+            "required":false,
+            "description":"Maximum number of mappers for DistCP",
+            "example":"1"
+        },
+        {
+            "propertyName":"distcpMapBandwidth",
+            "required":false,
+            "description":"Bandwidth in MB for each mapper in DistCP",
+            "example":"100"
+        },
+        {
+            "propertyName":"jobNotificationType",
+            "required":false,
+            "description":"Email Notification for Falcon instance completion",
+            "example":"email"
+        },
+        {
+            "propertyName":"jobNotificationReceivers",
+            "required":false,
+            "description":"Comma separated email Id's",
+            "example":"user1@gmail.com, user2@gmail.com"
+        }
+    ]
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-template.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-template.xml b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-template.xml
new file mode 100644
index 0000000..d511d00
--- /dev/null
+++ b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-template.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##jobName##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##jobClusterName##">
+            <validity end="##jobValidityEnd##" start="##jobValidityStart##"/>
+        </cluster>
+    </clusters>
+
+    <tags/>
+
+    <parallel>1</parallel>
+    <!-- Dir replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##jobFrequency##</frequency>
+    <timezone>##jobTimezone##</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##jobWorkflowName##" engine="##jobWorkflowEngine##"
+              path="##jobWorkflowPath##" lib="##jobWorkflowLibPath##"/>
+    <retry policy="##jobRetryPolicy##" delay="##jobRetryDelay##" attempts="3"/>
+    <notification type="##jobNotificationType##" to="##jobNotificationReceivers##"/>
+    <ACL/>
+</process>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml
new file mode 100644
index 0000000..1e2282c
--- /dev/null
+++ b/addons/extensions/hdfs-mirroring/src/main/resources/runtime/hdfs-mirroring-workflow.xml
@@ -0,0 +1,82 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-fs-workflow'>
+    <start to='dr-replication'/>
+    <!-- Replication action -->
+    <action name="dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.oozie.libpath</name>
+                    <value>${wf:conf("falcon.libpath")}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
+                    <value>${sourceClusterFS},${targetClusterFS}</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.replication.FeedReplicator</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-maxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-mapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-sourcePaths</arg>
+            <arg>${sourceDir}</arg>
+            <arg>-targetPath</arg>
+            <arg>${targetClusterFS}${targetDir}</arg>
+            <arg>-falconFeedStorageType</arg>
+            <arg>FILESYSTEM</arg>
+            <arg>-availabilityFlag</arg>
+            <arg>${availabilityFlag == 'NA' ? "NA" : availabilityFlag}</arg>
+            <arg>-counterLogDir</arg>
+            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}</arg>
+        </java>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>
+            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+        </message>
+    </kill>
+    <end name="end"/>
+</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/README
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/README b/addons/extensions/hive-mirroring/README
new file mode 100644
index 0000000..827f7e5
--- /dev/null
+++ b/addons/extensions/hive-mirroring/README
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Hive Metastore Disaster Recovery Recipe
+
+Overview
+This extension implements replicating hive metadata and data from one
+Hadoop cluster to another Hadoop cluster.
+This piggy backs on replication solution in Falcon which uses the DistCp tool.
+
+Use Case
+*
+*
+
+Limitations
+*
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Hive Metastore Disaster Recovery Extension
+
+Overview
+This extension implements replicating hive metadata and data from one
+Hadoop cluster to another Hadoop cluster.
+This piggy backs on replication solution in Falcon which uses the DistCp tool.
+
+Use Case
+*
+*
+
+Limitations
+*

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/pom.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/pom.xml b/addons/extensions/hive-mirroring/pom.xml
new file mode 100644
index 0000000..adfb0be
--- /dev/null
+++ b/addons/extensions/hive-mirroring/pom.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.falcon.extensions</groupId>
+    <artifactId>falcon-hive-mirroring-extension</artifactId>
+    <version>0.10-SNAPSHOT</version>
+    <description>Apache Falcon sample Hive mirroring extension</description>
+    <name>Apache Falcon sample Hive mirroring extension</name>
+    <packaging>jar</packaging>
+</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json
new file mode 100644
index 0000000..a9f3d1b
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-properties.json
@@ -0,0 +1,179 @@
+{
+    "shortDescription":"This extension implements replicating hive metadata and data from one Hadoop cluster to another Hadoop cluster.",
+    "properties":[
+        {
+            "propertyName":"jobName",
+            "required":true,
+            "description":"Unique job name",
+            "example":"hive-monthly-sales-dr"
+        },
+        {
+            "propertyName":"jobClusterName",
+            "required":true,
+            "description":"Cluster where job should run",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"jobValidityStart",
+            "required":true,
+            "description":"Job validity start time",
+            "example":"2016-03-03T00:00Z"
+        },
+        {
+            "propertyName":"jobValidityEnd",
+            "required":true,
+            "description":"Job validity end time",
+            "example":"2018-03-13T00:00Z"
+        },
+        {
+            "propertyName":"jobFrequency",
+            "required":true,
+            "description":"job frequency. Valid frequency types are minutes, hours, days, months",
+            "example":"months(1)"
+        },
+        {
+            "propertyName":"jobTimezone",
+            "required":false,
+            "description":"Time zone for the job",
+            "example":"GMT"
+        },
+        {
+            "propertyName":"jobTags",
+            "required":false,
+            "description":"list of comma separated tags. Key Value Pairs, separated by comma",
+            "example":"consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting"
+        },
+        {
+            "propertyName":"jobRetryPolicy",
+            "required":false,
+            "description":"Job retry policy",
+            "example":"periodic"
+        },
+        {
+            "propertyName":"jobRetryDelay",
+            "required":false,
+            "description":"Job retry delay",
+            "example":"minutes(30)"
+        },
+        {
+            "propertyName":"jobRetryAttempts",
+            "required":false,
+            "description":"Job retry attempts",
+            "example":"3"
+        },
+        {
+            "propertyName":"jobRetryOnTimeout",
+            "required":false,
+            "description":"Job retry on timeout",
+            "example":true
+        },
+        {
+            "propertyName":"jobAclOwner",
+            "required":false,
+            "description":"ACL owner",
+            "example":"ambari-qa"
+        },
+        {
+            "propertyName":"jobAclGroup",
+            "required":false,
+            "description":"ACL group",
+            "example":"users"
+        },
+        {
+            "propertyName":"jobAclPermission",
+            "required":false,
+            "description":"ACL permission",
+            "example":"0x755"
+        },
+        {
+            "propertyName":"sourceCluster",
+            "required":true,
+            "description":"Source cluster for hive mirroring",
+            "example":"primaryCluster"
+        },
+        {
+            "propertyName":"sourceHiveServer2Uri",
+            "required":true,
+            "description":"Hive2 server end point",
+            "example":"hive2://localhost:10000"
+        },
+        {
+            "propertyName":"sourceDatabases",
+            "required":true,
+            "description":"For DB level replication specify multiple comma separated databases to replicate",
+            "example":"salesDb"
+        },
+        {
+            "propertyName":"sourceTables",
+            "required":false,
+            "description":"For table level replication specify multiple comma separated tables to replicate",
+            "example":"monthly_sales1, monthly_sales2"
+        },
+        {
+            "propertyName":"sourceStagingPath",
+            "required":false,
+            "description":"Staging path on source",
+            "example":"/apps/hive/dr"
+        },
+        {
+            "propertyName":"targetCluster",
+            "required":true,
+            "description":"target cluster for hive mirroring",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"targetHiveServer2Uri",
+            "required":true,
+            "description":"Hive2 server end point",
+            "example":"hive2://localhost:10000"
+        },
+        {
+            "propertyName":"targetStagingPath",
+            "required":false,
+            "description":"Staging path on target",
+            "example":"/apps/hive/dr"
+        },
+        {
+            "propertyName":"maxEvents",
+            "required":false,
+            "description":"To ceil the max events processed each time the job runs. Set it to max value depending on your bandwidth limit. Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!",
+            "example":"10000"
+        },
+        {
+            "propertyName":"replicationMaxMaps",
+            "required":false,
+            "description":"Maximum number of mappers to use for hive replication",
+            "example":"1"
+        },
+        {
+            "propertyName":"distcpMaxMaps",
+            "required":false,
+            "description":"Maximum number of mappers for DistCP",
+            "example":"1"
+        },
+        {
+            "propertyName":"distcpMapBandwidth",
+            "required":false,
+            "description":"Bandwidth in MB for each mapper in DistCP",
+            "example":"100"
+        },
+        {
+            "propertyName":"tdeEncryptionEnabled",
+            "required":false,
+            "description":"Set this flag to true if TDE encryption is enabled on source and target. Default value is false",
+            "example":"true"
+        },
+        {
+            "propertyName":"jobNotificationType",
+            "required":false,
+            "description":"Email Notification for Falcon instance completion",
+            "example":"email"
+        },
+        {
+            "propertyName":"jobNotificationReceivers",
+            "required":false,
+            "description":"Comma separated email Id's",
+            "example":"user1@gmail.com, user2@gmail.com"
+        }
+    ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-secure-properties.json
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-secure-properties.json b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-secure-properties.json
new file mode 100644
index 0000000..8ec03b5
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/META/hive-mirroring-secure-properties.json
@@ -0,0 +1,191 @@
+{
+    "shortDescription": "This extension implements replicating hive metadata and data from one Hadoop cluster to another Hadoop cluster in secure environment.",
+    "properties":[
+        {
+            "propertyName":"jobName",
+            "required":true,
+            "description":"Unique job name",
+            "example":"hive-monthly-sales-dr"
+        },
+        {
+            "propertyName":"jobClusterName",
+            "required":true,
+            "description":"Cluster where job should run",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"jobValidityStart",
+            "required":true,
+            "description":"Job validity start time",
+            "example":"2016-03-03T00:00Z"
+        },
+        {
+            "propertyName":"jobValidityEnd",
+            "required":true,
+            "description":"Job validity end time",
+            "example":"2018-03-13T00:00Z"
+        },
+        {
+            "propertyName":"jobFrequency",
+            "required":true,
+            "description":"job frequency. Valid frequency types are minutes, hours, days, months",
+            "example":"months(1)"
+        },
+        {
+            "propertyName":"jobTimezone",
+            "required":false,
+            "description":"Time zone for the job",
+            "example":"GMT"
+        },
+        {
+            "propertyName":"jobTags",
+            "required":false,
+            "description":"list of comma separated tags. Key Value Pairs, separated by comma",
+            "example":"consumer=consumer@xyz.com, owner=producer@xyz.com, _department_type=forecasting"
+        },
+        {
+            "propertyName":"jobRetryPolicy",
+            "required":false,
+            "description":"Job retry policy",
+            "example":"periodic"
+        },
+        {
+            "propertyName":"jobRetryDelay",
+            "required":false,
+            "description":"Job retry delay",
+            "example":"minutes(30)"
+        },
+        {
+            "propertyName":"jobRetryAttempts",
+            "required":false,
+            "description":"Job retry attempts",
+            "example":"3"
+        },
+        {
+            "propertyName":"jobRetryOnTimeout",
+            "required":false,
+            "description":"Job retry on timeout",
+            "example":true
+        },
+        {
+            "propertyName":"jobAclOwner",
+            "required":false,
+            "description":"ACL owner",
+            "example":"ambari-qa"
+        },
+        {
+            "propertyName":"jobAclGroup",
+            "required":false,
+            "description":"ACL group",
+            "example":"users"
+        },
+        {
+            "propertyName":"jobAclPermission",
+            "required":false,
+            "description":"ACL permission",
+            "example":"0x755"
+        },
+        {
+            "propertyName":"sourceCluster",
+            "required":true,
+            "description":"Source cluster for hive mirroring",
+            "example":"primaryCluster"
+        },
+        {
+            "propertyName":"sourceHiveServer2Uri",
+            "required":true,
+            "description":"Hive2 server end point",
+            "example":"hive2://localhost:10000"
+        },
+        {
+            "propertyName":"sourceDatabases",
+            "required":true,
+            "description":"For DB level replication specify multiple comma separated databases to replicate",
+            "example":"salesDb"
+        },
+        {
+            "propertyName":"sourceTables",
+            "required":false,
+            "description":"For table level replication specify multiple comma separated tables to replicate",
+            "example":"monthly_sales1, monthly_sales2"
+        },
+        {
+            "propertyName":"sourceStagingPath",
+            "required":false,
+            "description":"Staging path on source",
+            "example":"/apps/hive/dr"
+        },
+        {
+            "propertyName":"sourceHive2KerberosPrincipal",
+            "required":true,
+            "description":"Required on secure clusters. Kerberos principal required to access hive servers ",
+            "example":"hive/_HOST@EXAMPLE.COM"
+        },
+        {
+            "propertyName":"targetCluster",
+            "required":true,
+            "description":"target cluster for hive mirroring",
+            "example":"backupCluster"
+        },
+        {
+            "propertyName":"targetHiveServer2Uri",
+            "required":true,
+            "description":"Hive2 server end point",
+            "example":"hive2://localhost:10000"
+        },
+        {
+            "propertyName":"targetStagingPath",
+            "required":false,
+            "description":"Staging path on target",
+            "example":"/apps/hive/dr"
+        },
+        {
+            "propertyName":"targetHive2KerberosPrincipal",
+            "required":true,
+            "description":"Required on secure clusters. Kerberos principal required to access hive servers ",
+            "example":"hive/_HOST@EXAMPLE.COM"
+        },
+        {
+            "propertyName":"maxEvents",
+            "required":false,
+            "description":"To ceil the max events processed each time the job runs. Set it to max value depending on your bandwidth limit. Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!",
+            "example":"10000"
+        },
+        {
+            "propertyName":"replicationMaxMaps",
+            "required":false,
+            "description":"Maximum number of mappers to use for hive replication",
+            "example":"1"
+        },
+        {
+            "propertyName":"distcpMaxMaps",
+            "required":false,
+            "description":"Maximum number of mappers for DistCP",
+            "example":"1"
+        },
+        {
+            "propertyName":"distcpMapBandwidth",
+            "required":false,
+            "description":"Bandwidth in MB for each mapper in DistCP",
+            "example":"100"
+        },
+        {
+            "propertyName":"tdeEncryptionEnabled",
+            "required":false,
+            "description":"Set this flag to true if TDE encryption is enabled on source and target. Default value is false",
+            "example":"true"
+        },
+        {
+            "propertyName":"jobNotificationType",
+            "required":false,
+            "description":"Email Notification for Falcon instance completion",
+            "example":"email"
+        },
+        {
+            "propertyName":"jobNotificationReceivers",
+            "required":false,
+            "description":"Comma separated email Id's",
+            "example":"user1@gmail.com, user2@gmail.com"
+        }
+    ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-template.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-template.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-template.xml
new file mode 100644
index 0000000..4497bb4
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-template.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##jobName##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##jobClusterName##">
+            <validity end="##jobValidityEnd##" start="##jobValidityStart##"/>
+        </cluster>
+    </clusters>
+
+    <tags/>
+
+    <parallel>1</parallel>
+    <!-- Replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##jobFrequency##</frequency>
+    <timezone>##jobTimezone##</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##jobWorkflowName##" engine="##jobWorkflowEngine##"
+              path="##jobWorkflowPath##" lib="##jobWorkflowLibPath##"/>
+    <retry policy="##jobRetryPolicy##" delay="##jobRetryDelay##" attempts="3"/>
+    <notification type="##jobNotificationType##" to="##jobNotificationReceivers##"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml
new file mode 100644
index 0000000..4bf048f
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-secure-workflow.xml
@@ -0,0 +1,363 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
+    <credentials>
+        <credential name='hive_src_credentials' type='hcat'>
+            <property>
+                <name>hcat.metastore.uri</name>
+                <value>${sourceMetastoreUri}</value>
+            </property>
+            <property>
+                <name>hcat.metastore.principal</name>
+                <value>${sourceHiveMetastoreKerberosPrincipal}</value>
+            </property>
+        </credential>
+        <credential name='hive_tgt_credentials' type='hcat'>
+            <property>
+                <name>hcat.metastore.uri</name>
+                <value>${targetMetastoreUri}</value>
+            </property>
+            <property>
+                <name>hcat.metastore.principal</name>
+                <value>${targetHiveMetastoreKerberosPrincipal}</value>
+            </property>
+        </credential>
+        <credential name="hive2_src_credentials" type="hive2">
+            <property>
+                <name>hive2.server.principal</name>
+                <value>${sourceHive2KerberosPrincipal}</value>
+            </property>
+            <property>
+                <name>hive2.jdbc.url</name>
+                <value>jdbc:${sourceHiveServer2Uri}/${sourceDatabase}</value>
+            </property>
+        </credential>
+        <credential name="hive2_tgt_credentials" type="hive2">
+            <property>
+                <name>hive2.server.principal</name>
+                <value>${targetHive2KerberosPrincipal}</value>
+            </property>
+            <property>
+                <name>hive2.jdbc.url</name>
+                <value>jdbc:${targetHiveServer2Uri}/${sourceDatabase}</value>
+            </property>
+        </credential>
+    </credentials>
+    <start to='last-event'/>
+    <action name="last-event" cred="hive_tgt_credentials">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+                <property>
+                    <name>mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-sourceNNKerberosPrincipal</arg>
+            <arg>${sourceNNKerberosPrincipal}</arg>
+            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
+            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-sourceHive2KerberosPrincipal</arg>
+            <arg>${sourceHive2KerberosPrincipal}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-targetNNKerberosPrincipal</arg>
+            <arg>${targetNNKerberosPrincipal}</arg>
+            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
+            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-targetHive2KerberosPrincipal</arg>
+            <arg>${targetHive2KerberosPrincipal}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-clusterForJobNNKerberosPrincipal</arg>
+            <arg>${clusterForJobNNKerberosPrincipal}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>lastevents</arg>
+        </java>
+        <ok to="export-dr-replication"/>
+        <error to="fail"/>
+    </action>
+    <!-- Export Replication action -->
+    <action name="export-dr-replication" cred="hive_src_credentials,hive2_src_credentials">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+                <property>
+                    <name>mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-sourceNNKerberosPrincipal</arg>
+            <arg>${sourceNNKerberosPrincipal}</arg>
+            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
+            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-sourceHive2KerberosPrincipal</arg>
+            <arg>${sourceHive2KerberosPrincipal}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-targetNNKerberosPrincipal</arg>
+            <arg>${targetNNKerberosPrincipal}</arg>
+            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
+            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-targetHive2KerberosPrincipal</arg>
+            <arg>${targetHive2KerberosPrincipal}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-clusterForJobNNKerberosPrincipal</arg>
+            <arg>${clusterForJobNNKerberosPrincipal}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>export</arg>
+            <arg>-counterLogDir</arg>
+            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
+        </java>
+        <ok to="import-dr-replication"/>
+        <error to="fail"/>
+    </action>
+    <!-- Import Replication action -->
+    <action name="import-dr-replication" cred="hive_tgt_credentials,hive2_tgt_credentials">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+                <property>
+                    <name>mapreduce.job.hdfs-servers</name>
+                    <value>${sourceNN},${targetNN}</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-sourceNNKerberosPrincipal</arg>
+            <arg>${sourceNNKerberosPrincipal}</arg>
+            <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
+            <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-sourceHive2KerberosPrincipal</arg>
+            <arg>${sourceHive2KerberosPrincipal}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-targetNNKerberosPrincipal</arg>
+            <arg>${targetNNKerberosPrincipal}</arg>
+            <arg>-targetHiveMetastoreKerberosPrincipal</arg>
+            <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
+            <arg>-targetHive2KerberosPrincipal</arg>
+            <arg>${targetHive2KerberosPrincipal}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-clusterForJobNNKerberosPrincipal</arg>
+            <arg>${clusterForJobNNKerberosPrincipal}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>import</arg>
+        </java>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>
+            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+        </message>
+    </kill>
+    <end name="end"/>
+</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-template.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-template.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-template.xml
new file mode 100644
index 0000000..4497bb4
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-template.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="##jobName##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <!--  source  -->
+        <cluster name="##jobClusterName##">
+            <validity end="##jobValidityEnd##" start="##jobValidityStart##"/>
+        </cluster>
+    </clusters>
+
+    <tags/>
+
+    <parallel>1</parallel>
+    <!-- Replication needs to run only once to catch up -->
+    <order>LAST_ONLY</order>
+    <frequency>##jobFrequency##</frequency>
+    <timezone>##jobTimezone##</timezone>
+
+    <properties>
+        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
+    </properties>
+
+    <workflow name="##jobWorkflowName##" engine="##jobWorkflowEngine##"
+              path="##jobWorkflowPath##" lib="##jobWorkflowLibPath##"/>
+    <retry policy="##jobRetryPolicy##" delay="##jobRetryDelay##" attempts="3"/>
+    <notification type="##jobNotificationType##" to="##jobNotificationReceivers##"/>
+    <ACL/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml
new file mode 100644
index 0000000..9f9bf92
--- /dev/null
+++ b/addons/extensions/hive-mirroring/src/main/resources/runtime/hive-mirroring-workflow.xml
@@ -0,0 +1,255 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
+    <start to='last-event'/>
+    <action name="last-event">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>lastevents</arg>
+        </java>
+        <ok to="export-dr-replication"/>
+        <error to="fail"/>
+    </action>
+    <!-- Export Replication action -->
+    <action name="export-dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>export</arg>
+            <arg>-counterLogDir</arg>
+            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
+        </java>
+        <ok to="import-dr-replication"/>
+        <error to="fail"/>
+    </action>
+    <!-- Import Replication action -->
+    <action name="import-dr-replication">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <configuration>
+                <property> <!-- hadoop 2 parameter -->
+                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>${queueName}</value>
+                </property>
+                <property>
+                    <name>oozie.launcher.mapred.job.priority</name>
+                    <value>${jobPriority}</value>
+                </property>
+                <property>
+                    <name>oozie.use.system.libpath</name>
+                    <value>true</value>
+                </property>
+                <property>
+                    <name>oozie.action.sharelib.for.java</name>
+                    <value>distcp,hive,hive2,hcatalog</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
+            <arg>-Dmapred.job.queue.name=${queueName}</arg>
+            <arg>-Dmapred.job.priority=${jobPriority}</arg>
+            <arg>-falconLibPath</arg>
+            <arg>${wf:conf("falcon.libpath")}</arg>
+            <arg>-replicationMaxMaps</arg>
+            <arg>${replicationMaxMaps}</arg>
+            <arg>-distcpMaxMaps</arg>
+            <arg>${distcpMaxMaps}</arg>
+            <arg>-sourceCluster</arg>
+            <arg>${sourceCluster}</arg>
+            <arg>-sourceMetastoreUri</arg>
+            <arg>${sourceMetastoreUri}</arg>
+            <arg>-sourceHiveServer2Uri</arg>
+            <arg>${sourceHiveServer2Uri}</arg>
+            <arg>-sourceDatabase</arg>
+            <arg>${sourceDatabase}</arg>
+            <arg>-sourceTable</arg>
+            <arg>${sourceTable}</arg>
+            <arg>-sourceStagingPath</arg>
+            <arg>${sourceStagingPath}</arg>
+            <arg>-sourceNN</arg>
+            <arg>${sourceNN}</arg>
+            <arg>-targetCluster</arg>
+            <arg>${targetCluster}</arg>
+            <arg>-targetMetastoreUri</arg>
+            <arg>${targetMetastoreUri}</arg>
+            <arg>-targetHiveServer2Uri</arg>
+            <arg>${targetHiveServer2Uri}</arg>
+            <arg>-targetStagingPath</arg>
+            <arg>${targetStagingPath}</arg>
+            <arg>-targetNN</arg>
+            <arg>${targetNN}</arg>
+            <arg>-maxEvents</arg>
+            <arg>${maxEvents}</arg>
+            <arg>-distcpMapBandwidth</arg>
+            <arg>${distcpMapBandwidth}</arg>
+            <arg>-clusterForJobRun</arg>
+            <arg>${clusterForJobRun}</arg>
+            <arg>-clusterForJobRunWriteEP</arg>
+            <arg>${clusterForJobRunWriteEP}</arg>
+            <arg>-tdeEncryptionEnabled</arg>
+            <arg>${tdeEncryptionEnabled}</arg>
+            <arg>-jobName</arg>
+            <arg>${jobName}-${nominalTime}</arg>
+            <arg>-executionStage</arg>
+            <arg>import</arg>
+        </java>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>
+            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+        </message>
+    </kill>
+    <end name="end"/>
+</workflow-app>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
index c9ad47e..71b9043 100644
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
+++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
@@ -32,7 +32,7 @@ public enum HiveDRArgs {
     SOURCE_HS2_URI("sourceHiveServer2Uri", "source HS2 uri"),
     SOURCE_DATABASE("sourceDatabase", "comma source databases"),
     SOURCE_TABLE("sourceTable", "comma source tables"),
-    SOURCE_STAGING_PATH("sourceStagingPath", "source staging path for data"),
+    SOURCE_STAGING_PATH("sourceStagingPath", "source staging path for data", false),
 
     // source hadoop endpoints
     SOURCE_NN("sourceNN", "source name node"),
@@ -47,7 +47,7 @@ public enum HiveDRArgs {
     TARGET_METASTORE_URI("targetMetastoreUri", "source meta store uri"),
     TARGET_HS2_URI("targetHiveServer2Uri", "source meta store uri"),
 
-    TARGET_STAGING_PATH("targetStagingPath", "source staging path for data"),
+    TARGET_STAGING_PATH("targetStagingPath", "source staging path for data", false),
 
     // target hadoop endpoints
     TARGET_NN("targetNN", "target name node"),
@@ -70,16 +70,13 @@ public enum HiveDRArgs {
     // Map Bandwidth
     DISTCP_MAP_BANDWIDTH("distcpMapBandwidth", "map bandwidth in mb", false),
 
-    JOB_NAME("drJobName", "unique job name"),
+    JOB_NAME("jobName", "unique job name"),
 
     CLUSTER_FOR_JOB_RUN("clusterForJobRun", "cluster where job runs"),
     JOB_CLUSTER_NN("clusterForJobRunWriteEP", "write end point of cluster where job runs"),
     JOB_CLUSTER_NN_KERBEROS_PRINCIPAL("clusterForJobNNKerberosPrincipal",
             "Namenode kerberos principal of cluster on which replication job runs", false),
 
-
-    FALCON_LIBPATH("falconLibPath", "Falcon Lib Path for Jar files", false),
-
     KEEP_HISTORY("keepHistory", "Keep history of events file generated", false),
     EXECUTION_STAGE("executionStage", "Flag for workflow stage execution", false),
     COUNTER_LOGDIR("counterLogDir", "Log directory to store counter file", false);

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
index 868ec8d..0096727 100644
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
+++ b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
@@ -24,7 +24,7 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
+import org.apache.falcon.hive.util.FileUtils;
 
 import java.io.File;
 import java.util.Arrays;
@@ -70,11 +70,14 @@ public class HiveDROptions {
         return Arrays.asList(context.get(HiveDRArgs.SOURCE_TABLE).trim().split(","));
     }
 
-    public String getSourceStagingPath() throws HiveReplicationException {
-        if (StringUtils.isNotEmpty(context.get(HiveDRArgs.SOURCE_STAGING_PATH))) {
-            return context.get(HiveDRArgs.SOURCE_STAGING_PATH) + File.separator + getJobName();
+    public String getSourceStagingPath() {
+        String stagingPath = context.get(HiveDRArgs.SOURCE_STAGING_PATH);
+        if (StringUtils.isNotBlank(stagingPath)) {
+            stagingPath = StringUtils.removeEnd(stagingPath, File.separator);
+            return stagingPath + File.separator + getJobName();
+        } else {
+            return FileUtils.DEFAULT_EVENT_STORE_PATH + getJobName();
         }
-        throw new HiveReplicationException("Source StagingPath cannot be empty");
     }
 
     public String getSourceWriteEP() {
@@ -100,15 +103,19 @@ public class HiveDROptions {
     public String getTargetMetastoreKerberosPrincipal() {
         return context.get(HiveDRArgs.TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL);
     }
+
     public String getTargetHive2KerberosPrincipal() {
         return context.get(HiveDRArgs.TARGET_HIVE2_KERBEROS_PRINCIPAL);
     }
 
-    public String getTargetStagingPath() throws HiveReplicationException {
-        if (StringUtils.isNotEmpty(context.get(HiveDRArgs.TARGET_STAGING_PATH))) {
-            return context.get(HiveDRArgs.TARGET_STAGING_PATH) + File.separator + getJobName();
+    public String getTargetStagingPath() {
+        String stagingPath = context.get(HiveDRArgs.TARGET_STAGING_PATH);
+        if (StringUtils.isNotBlank(stagingPath)) {
+            stagingPath = StringUtils.removeEnd(stagingPath, File.separator);
+            return stagingPath + File.separator + getJobName();
+        } else {
+            return FileUtils.DEFAULT_EVENT_STORE_PATH + getJobName();
         }
-        throw new HiveReplicationException("Target StagingPath cannot be empty");
     }
 
     public String getReplicationMaxMaps() {
@@ -135,23 +142,10 @@ public class HiveDROptions {
         return context.get(HiveDRArgs.JOB_CLUSTER_NN_KERBEROS_PRINCIPAL);
     }
 
-    public void setSourceStagingDir(String path) {
-        context.put(HiveDRArgs.SOURCE_STAGING_PATH, path);
-    }
-
-    public void setTargetStagingDir(String path) {
-        context.put(HiveDRArgs.TARGET_STAGING_PATH, path);
-    }
-
     public String getExecutionStage() {
         return context.get(HiveDRArgs.EXECUTION_STAGE);
     }
 
-    public boolean isTDEEncryptionEnabled() {
-        return StringUtils.isEmpty(context.get(HiveDRArgs.TDE_ENCRYPTION_ENABLED))
-                ? false : Boolean.valueOf(context.get(HiveDRArgs.TDE_ENCRYPTION_ENABLED));
-    }
-
     public boolean shouldBlock() {
         return true;
     }

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hdfs-replication/README.txt
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/README.txt b/addons/recipes/hdfs-replication/README.txt
deleted file mode 100644
index 5742d43..0000000
--- a/addons/recipes/hdfs-replication/README.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDFS Directory Replication Recipe
-
-Overview
-This recipe implements replicating arbitrary directories on HDFS from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-* Copy directories between HDFS clusters with out dated partitions
-* Archive directories from HDFS to Cloud. Ex: S3, Azure WASB
-
-Limitations
-As the data volume and number of files grow, this can get inefficient.

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hdfs-replication/pom.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/pom.xml b/addons/recipes/hdfs-replication/pom.xml
deleted file mode 100644
index 98d9795..0000000
--- a/addons/recipes/hdfs-replication/pom.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.apache.falcon.recipes</groupId>
-    <artifactId>falcon-hdfs-replication-recipe</artifactId>
-    <version>0.10-SNAPSHOT</version>
-    <description>Apache Falcon Sample Hdfs Replicaiton Recipe</description>
-    <name>Apache Falcon Sample Hdfs Replication Recipe</name>
-    <packaging>jar</packaging>
-</project>

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
deleted file mode 100644
index 441a189..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-
-<process name="##falcon.recipe.job.name##" xmlns="uri:falcon:process:0.1">
-    <clusters>
-        <!--  source  -->
-        <cluster name="##falcon.recipe.cluster.name##">
-            <validity end="##falcon.recipe.cluster.validity.end##" start="##falcon.recipe.cluster.validity.start##"/>
-        </cluster>
-    </clusters>
-
-    <tags>_falcon_mirroring_type=HDFS</tags>
-
-    <parallel>1</parallel>
-    <!-- Dir replication needs to run only once to catch up -->
-    <order>LAST_ONLY</order>
-    <frequency>##falcon.recipe.frequency##</frequency>
-    <timezone>UTC</timezone>
-
-    <properties>
-        <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
-    </properties>
-
-    <workflow name="##falcon.recipe.workflow.name##" engine="oozie" path="/apps/data-mirroring/workflows/hdfs-replication-workflow.xml" lib="##workflow.lib.path##"/>
-    <retry policy="##falcon.recipe.retry.policy##" delay="##falcon.recipe.retry.delay##" attempts="3"/>
-    <notification type="##falcon.recipe.notification.type##" to="##falcon.recipe.notification.receivers##"/>
-    <ACL/>
-</process>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/95bf312f/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
deleted file mode 100644
index c1966be..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-  -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-fs-workflow'>
-    <start to='dr-replication'/>
-    <!-- Replication action -->
-    <action name="dr-replication">
-        <java>
-            <job-tracker>${jobTracker}</job-tracker>
-            <name-node>${nameNode}</name-node>
-            <configuration>
-                <property> <!-- hadoop 2 parameter -->
-                    <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>mapred.job.queue.name</name>
-                    <value>${queueName}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapred.job.priority</name>
-                    <value>${jobPriority}</value>
-                </property>
-                <property>
-                    <name>oozie.use.system.libpath</name>
-                    <value>true</value>
-                </property>
-                <property>
-                    <name>oozie.action.sharelib.for.java</name>
-                    <value>distcp</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.oozie.libpath</name>
-                    <value>${wf:conf("falcon.libpath")}</value>
-                </property>
-                <property>
-                    <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
-                    <value>${drSourceClusterFS},${drTargetClusterFS}</value>
-                </property>
-            </configuration>
-            <main-class>org.apache.falcon.replication.FeedReplicator</main-class>
-            <arg>-Dmapred.job.queue.name=${queueName}</arg>
-            <arg>-Dmapred.job.priority=${jobPriority}</arg>
-            <arg>-maxMaps</arg>
-            <arg>${distcpMaxMaps}</arg>
-            <arg>-mapBandwidth</arg>
-            <arg>${distcpMapBandwidth}</arg>
-            <arg>-sourcePaths</arg>
-            <arg>${drSourceDir}</arg>
-            <arg>-targetPath</arg>
-            <arg>${drTargetClusterFS}${drTargetDir}</arg>
-            <arg>-falconFeedStorageType</arg>
-            <arg>FILESYSTEM</arg>
-            <arg>-availabilityFlag</arg>
-            <arg>${availabilityFlag == 'NA' ? "NA" : availabilityFlag}</arg>
-            <arg>-counterLogDir</arg>
-            <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}</arg>
-        </java>
-        <ok to="end"/>
-        <error to="fail"/>
-    </action>
-    <kill name="fail">
-        <message>
-            Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
-        </message>
-    </kill>
-    <end name="end"/>
-</workflow-app>