You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by ba...@apache.org on 2016/08/16 22:48:38 UTC

[4/5] falcon git commit: Update falcon branch 0.10-refactored-ui to be up to date with branch 0.10

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java b/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
index 7f2b172..52feab7 100644
--- a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
+++ b/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
@@ -69,6 +69,10 @@ public final class ConfigurationStore implements FalconService {
     private static final Logger LOG = LoggerFactory.getLogger(ConfigurationStore.class);
     private static final Logger AUDIT = LoggerFactory.getLogger("AUDIT");
     private static final String UTF_8 = CharEncoding.UTF_8;
+    private static final String LOAD_ENTITIES_THREADS = "config.store.num.threads.load.entities";
+    private static final String TIMEOUT_MINS_LOAD_ENTITIES = "config.store.start.timeout.minutes";
+    private int numThreads;
+    private int restoreTimeOutInMins;
     private final boolean shouldPersist;
 
     private static final FsPermission STORE_PERMISSION =
@@ -149,6 +153,21 @@ public final class ConfigurationStore implements FalconService {
 
     @Override
     public void init() throws FalconException {
+        try {
+            numThreads = Integer.parseInt(StartupProperties.get().getProperty(LOAD_ENTITIES_THREADS, "100"));
+            LOG.info("Number of threads used to restore entities: {}", restoreTimeOutInMins);
+        } catch (NumberFormatException nfe) {
+            throw new FalconException("Invalid value specified for start up property \""
+                    + LOAD_ENTITIES_THREADS + "\".Please provide an integer value");
+        }
+        try {
+            restoreTimeOutInMins = Integer.parseInt(StartupProperties.get().
+                    getProperty(TIMEOUT_MINS_LOAD_ENTITIES, "30"));
+            LOG.info("TimeOut to load Entities is taken as {} mins", restoreTimeOutInMins);
+        } catch (NumberFormatException nfe) {
+            throw new FalconException("Invalid value specified for start up property \""
+                    + TIMEOUT_MINS_LOAD_ENTITIES + "\".Please provide an integer value");
+        }
         String listenerClassNames = StartupProperties.get().
                 getProperty("configstore.listeners", "org.apache.falcon.entity.v0.EntityGraph");
         for (String listenerClassName : listenerClassNames.split(",")) {
@@ -172,7 +191,8 @@ public final class ConfigurationStore implements FalconService {
             final ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
             FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
             if (files != null) {
-                final ExecutorService service = Executors.newFixedThreadPool(100);
+
+                final ExecutorService service = Executors.newFixedThreadPool(numThreads);
                 for (final FileStatus file : files) {
                     service.execute(new Runnable() {
                         @Override
@@ -183,6 +203,7 @@ public final class ConfigurationStore implements FalconService {
                                 // ".xml"
                                 String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
                                 Entity entity = restore(type, entityName);
+                                LOG.info("Restored configuration {}/{}", type, entityName);
                                 entityMap.put(entityName, entity);
                             } catch (IOException | FalconException e) {
                                 LOG.error("Unable to restore entity of", file);
@@ -191,10 +212,10 @@ public final class ConfigurationStore implements FalconService {
                     });
                 }
                 service.shutdown();
-                if (service.awaitTermination(10, TimeUnit.MINUTES)) {
+                if (service.awaitTermination(restoreTimeOutInMins, TimeUnit.MINUTES)) {
                     LOG.info("Restored Configurations for entity type: {} ", type.name());
                 } else {
-                    LOG.warn("Time out happened while waiting for all threads to finish while restoring entities "
+                    LOG.warn("Timed out while waiting for all threads to finish while restoring entities "
                             + "for type: {}", type.name());
                 }
                 // Checking if all entities were loaded
@@ -336,6 +357,7 @@ public final class ConfigurationStore implements FalconService {
                 } catch (IOException e) {
                     throw new StoreAccessException(e);
                 }
+                LOG.info("Restored configuration {}/{}", type, name);
                 entityMap.put(name, entity);
                 return entity;
             } else {
@@ -445,7 +467,6 @@ public final class ConfigurationStore implements FalconService {
             throw new StoreAccessException("Unable to un-marshall xml definition for " + type + "/" + name, e);
         } finally {
             in.close();
-            LOG.info("Restored configuration {}/{}", type, name);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java b/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
index e4d9385..acb570e 100644
--- a/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
+++ b/common/src/main/java/org/apache/falcon/entity/v0/EntityGraph.java
@@ -200,6 +200,16 @@ public final class EntityGraph implements ConfigurationChangeListener {
                 feedEdges.add(dbNode);
                 dbEdges.add(feedNode);
             }
+
+            if (FeedHelper.isExportEnabled(cluster)) {
+                Node dbNode = new Node(EntityType.DATASOURCE, FeedHelper.getExportDatasourceName(cluster));
+                if (!nodeEdges.containsKey(dbNode)) {
+                    nodeEdges.put(dbNode, new HashSet<Node>());
+                }
+                Set<Node> dbEdges = nodeEdges.get(dbNode);
+                feedEdges.add(dbNode);
+                dbEdges.add(feedNode);
+            }
         }
         return nodeEdges;
     }

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java b/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
index d70c4b9..e30f51e 100644
--- a/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
+++ b/common/src/main/java/org/apache/falcon/hadoop/HadoopClientFactory.java
@@ -262,7 +262,9 @@ public final class HadoopClientFactory {
 
         try {
             if (UserGroupInformation.isSecurityEnabled()) {
-                ugi.checkTGTAndReloginFromKeytab();
+                LOG.debug("Revalidating Auth Token with auth method {}",
+                        UserGroupInformation.getLoginUser().getAuthenticationMethod().name());
+                UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
             }
         } catch (IOException ioe) {
             throw new FalconException("Exception while getting FileSystem. Unable to check TGT for user "

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java b/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java
new file mode 100644
index 0000000..a256e46
--- /dev/null
+++ b/common/src/main/java/org/apache/falcon/metadata/GraphUpdateUtils.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.metadata;
+
+import com.tinkerpop.blueprints.Graph;
+import com.tinkerpop.blueprints.util.io.graphson.GraphSONReader;
+import com.tinkerpop.blueprints.util.io.graphson.GraphSONWriter;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.io.FileUtils;
+import org.apache.falcon.FalconException;
+
+import java.io.File;
+
+/**
+ * Utility class for graph operations.
+ */
+public final class GraphUpdateUtils {
+
+    private static final String BANNER_MSG =
+            "Before running this utility please make sure that Falcon startup properties "
+                    + "has the right configuration settings for the graph database, "
+                    + "Falcon server is stopped and no other access to the graph database is being performed.";
+
+    private static final String IMPORT = "import";
+    private static final String EXPORT = "export";
+    private static final String INSTANCE_JSON_FILE = "instanceMetadata.json";
+
+    private GraphUpdateUtils() {
+    }
+
+    public static void main(String[] args) {
+        if (args.length != 2) {
+            usage();
+            System.exit(1);
+        }
+        System.out.println(BANNER_MSG);
+        String operation = args[0].toLowerCase();
+        if (!(operation.equals(EXPORT) || operation.equals(IMPORT))) {
+            usage();
+            System.exit(1);
+        }
+        String utilsDir = args[1];
+        File utilsDirFile = new File(utilsDir);
+        if (!utilsDirFile.isDirectory()) {
+            System.err.println(utilsDir + " is not a valid directory");
+            System.exit(1);
+        }
+        String jsonFile = new File(utilsDirFile, INSTANCE_JSON_FILE).getAbsolutePath();
+        try {
+            Graph graph;
+            if (operation.equals(EXPORT)) {
+                graph = MetadataMappingService.initializeGraphDB();
+                GraphSONWriter.outputGraph(graph, jsonFile);
+                System.out.println("Exported instance metadata to " + jsonFile);
+            } else {
+                // Backup existing graphDB dir
+                Configuration graphConfig = MetadataMappingService.getConfiguration();
+                String graphStore = (String) graphConfig.getProperty("storage.directory");
+                File graphStoreFile = new File(graphStore);
+                File graphDirBackup = new File(graphStore + "_backup");
+                if (graphDirBackup.exists()) {
+                    FileUtils.deleteDirectory(graphDirBackup);
+                }
+                FileUtils.copyDirectory(graphStoreFile, graphDirBackup);
+
+                // delete graph dir first and then init graphDB to ensure IMPORT happens into empty DB.
+                FileUtils.deleteDirectory(graphStoreFile);
+                graph = MetadataMappingService.initializeGraphDB();
+
+                // Import, if there is an exception restore backup.
+                try {
+                    GraphSONReader.inputGraph(graph, jsonFile);
+                    System.out.println("Imported instance metadata to " + jsonFile);
+                } catch (Exception ex) {
+                    String errorMsg = ex.getMessage();
+                    if (graphStoreFile.exists()) {
+                        FileUtils.deleteDirectory(graphStoreFile);
+                    }
+                    FileUtils.copyDirectory(graphDirBackup, graphStoreFile);
+                    throw new FalconException(errorMsg);
+                }
+            }
+        } catch (Exception e) {
+            System.err.println("Error " + operation + "ing JSON data to " + jsonFile + ", " + e.getMessage());
+            e.printStackTrace(System.out);
+            System.exit(1);
+        }
+        System.exit(0);
+    }
+
+    public static void usage() {
+        StringBuilder usageMessage = new StringBuilder(1024);
+        usageMessage.append("usage: java ").append(GraphUpdateUtils.class.getName())
+                .append(" {").append(EXPORT).append('|').append(IMPORT).append("} <directory>");
+        System.err.println(usageMessage);
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java b/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
index 66a3a58..225e44a 100644
--- a/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
+++ b/common/src/main/java/org/apache/falcon/metadata/MetadataMappingService.java
@@ -35,6 +35,7 @@ import com.tinkerpop.blueprints.util.TransactionWork;
 import org.apache.commons.configuration.BaseConfiguration;
 import org.apache.commons.configuration.Configuration;
 import org.apache.falcon.FalconException;
+import org.apache.falcon.FalconRuntimException;
 import org.apache.falcon.entity.store.ConfigurationStore;
 import org.apache.falcon.entity.v0.Entity;
 import org.apache.falcon.entity.v0.EntityType;
@@ -48,6 +49,9 @@ import org.slf4j.LoggerFactory;
 import org.apache.falcon.workflow.WorkflowExecutionContext;
 import org.apache.falcon.workflow.WorkflowExecutionListener;
 
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
@@ -69,7 +73,26 @@ public class MetadataMappingService
      * Constant for the configuration property that indicates the prefix.
      */
     private static final String FALCON_PREFIX = "falcon.graph.";
-
+    /**
+     * Constant for the configuration property that indicates the storage backend.
+     */
+    public static final String PROPERTY_KEY_STORAGE_BACKEND = "storage.backend";
+    public static final String STORAGE_BACKEND_HBASE = "hbase";
+    public static final String STORAGE_BACKEND_BDB = "berkeleyje";
+    /**
+     * HBase configuration properties.
+     */
+    public static final String PROPERTY_KEY_STORAGE_HOSTNAME = "storage.hostname";
+    public static final String PROPERTY_KEY_STORAGE_TABLE = "storage.hbase.table";
+    public static final Set<String> PROPERTY_KEYS_HBASE = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
+            PROPERTY_KEY_STORAGE_HOSTNAME, PROPERTY_KEY_STORAGE_TABLE)));
+    /**
+     * Berkeley DB configuration properties.
+     */
+    public static final String PROPERTY_KEY_STORAGE_DIRECTORY = "storage.directory";
+    public static final String PROPERTY_KEY_SERIALIZE_PATH = "serialize.path";
+    public static final Set<String> PROPERTY_KEYS_BDB = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
+            PROPERTY_KEY_STORAGE_DIRECTORY, PROPERTY_KEY_SERIALIZE_PATH)));
 
     private Graph graph;
     private Set<String> vertexIndexedKeys;
@@ -116,13 +139,58 @@ public class MetadataMappingService
         }
     }
 
-    protected Graph initializeGraphDB() {
+    public static Graph initializeGraphDB() {
         LOG.info("Initializing graph db");
-
         Configuration graphConfig = getConfiguration();
+        validateConfiguration(graphConfig);
         return GraphFactory.open(graphConfig);
     }
 
+    private static void validateConfiguration(Configuration graphConfig) {
+        // check if storage backend if configured
+        if (!graphConfig.containsKey(PROPERTY_KEY_STORAGE_BACKEND)) {
+            throw new FalconRuntimException("Titan GraphDB storage backend is not configured. "
+                    + "You need to choose either hbase or berkeleydb."
+                    + "Please check Configuration twiki or "
+                    + "the section Graph Database Properties in startup.properties "
+                    + "on how to configure Titan GraphDB backend.");
+        }
+
+        String backend = graphConfig.getString(PROPERTY_KEY_STORAGE_BACKEND);
+        switch (backend) {
+        case STORAGE_BACKEND_BDB:
+            // check required parameter for Berkeley DB backend
+            for (String key : PROPERTY_KEYS_BDB) {
+                if (!graphConfig.containsKey(key)) {
+                    throw new FalconRuntimException("Required parameter " + FALCON_PREFIX + key
+                            + " not found in startup.properties."
+                            + "Please check Configuration twiki or "
+                            + "the section Graph Database Properties in startup.properties "
+                            + "on how to configure Berkeley DB storage backend.");
+                }
+            }
+            break;
+        case STORAGE_BACKEND_HBASE:
+            // check required parameter for HBase backend
+            for (String key : PROPERTY_KEYS_HBASE) {
+                if (!graphConfig.containsKey(key)) {
+                    throw new FalconRuntimException("Required parameter " + FALCON_PREFIX + key
+                            + " not found in startup.properties."
+                            + "Please check Configuration twiki or "
+                            + "the section Graph Database Properties in startup.properties "
+                            + "on how to configure HBase storage backend.");
+                }
+            }
+            break;
+        default:
+            throw new FalconRuntimException("Invalid graph storage backend: " + backend + ". "
+                    + "You need to choose either hbase or berkeleydb."
+                    + "Please check Configuration twiki or "
+                    + "the section Graph Database Properties in startup.properties "
+                    + "on how to configure Titan GraphDB backend.");
+        }
+    }
+
     public static Configuration getConfiguration() {
         Configuration graphConfig = new BaseConfiguration();
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java b/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
index f7b2155..31be07a 100644
--- a/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
+++ b/common/src/main/java/org/apache/falcon/security/AuthenticationInitializationService.java
@@ -67,6 +67,7 @@ public class AuthenticationInitializationService implements FalconService {
 
     private Timer timer = new Timer();
     private static final String SERVICE_NAME = "Authentication initialization service";
+    private static final long DEFAULT_VALIDATE_FREQUENCY_SECS = 86300;
 
     @Override
     public String getName() {
@@ -83,8 +84,13 @@ public class AuthenticationInitializationService implements FalconService {
             String authTokenValidity = StartupProperties.get().getProperty(AUTH_TOKEN_VALIDITY_SECONDS);
             long validateFrequency;
             try {
+                // -100 so that revalidation is done before expiry.
                 validateFrequency = (StringUtils.isNotEmpty(authTokenValidity))
-                        ? Long.parseLong(authTokenValidity) : 86400;
+                        ? (Long.parseLong(authTokenValidity) - 100) : DEFAULT_VALIDATE_FREQUENCY_SECS;
+                if (validateFrequency < 0) {
+                    throw new NumberFormatException("Value provided for startup property \""
+                            + AUTH_TOKEN_VALIDITY_SECONDS + "\" should be greater than 100.");
+                }
             } catch (NumberFormatException nfe) {
                 throw new FalconException("Invalid value provided for startup property \""
                         + AUTH_TOKEN_VALIDITY_SECONDS + "\", please provide a valid long number", nfe);
@@ -149,12 +155,12 @@ public class AuthenticationInitializationService implements FalconService {
         @Override
         public void run() {
             try {
-                LOG.info("Validating Auth Token: {}", new Date());
-                initializeKerberos();
+                LOG.debug("Revalidating Auth Token at : {} with auth method {}", new Date(),
+                        UserGroupInformation.getLoginUser().getAuthenticationMethod().name());
+                UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
             } catch (Throwable t) {
-                LOG.error("Error in Auth Token Validation task: ", t);
-                GenericAlert.initializeKerberosFailed(
-                        "Exception in Auth Token Validation : ", t);
+                LOG.error("Error in Auth Token revalidation task: ", t);
+                GenericAlert.initializeKerberosFailed("Exception in Auth Token revalidation : ", t);
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java b/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
index 5eb6a25..a4410af 100644
--- a/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
+++ b/common/src/main/java/org/apache/falcon/security/DefaultAuthorizationProvider.java
@@ -59,6 +59,7 @@ public class DefaultAuthorizationProvider implements AuthorizationProvider {
 
     private static final Set<String> RESOURCES = new HashSet<String>(
             Arrays.asList(new String[]{"admin", "entities", "instance", "metadata", "extension", }));
+    private static final String LIST_OPERATION = "list";
 
     /**
      * Constant for the configuration property that indicates the prefix.
@@ -170,7 +171,11 @@ public class DefaultAuthorizationProvider implements AuthorizationProvider {
                     authorizeAdminResource(authenticatedUGI, action);
                 }
             } else if ("entities".equals(resource) || "instance".equals(resource)) {
-                authorizeEntityResource(authenticatedUGI, entityName, entityType, action);
+                if ("entities".equals(resource) && LIST_OPERATION.equals(action)) {
+                    LOG.info("Skipping authorization for entity list operations");
+                } else {
+                    authorizeEntityResource(authenticatedUGI, entityName, entityType, action);
+                }
             } else if ("metadata".equals(resource)) {
                 authorizeMetadataResource(authenticatedUGI, action);
             }
@@ -296,7 +301,6 @@ public class DefaultAuthorizationProvider implements AuthorizationProvider {
                                            String entityName, String entityType,
                                            String action)
         throws AuthorizationException, EntityNotRegisteredException {
-
         Validate.notEmpty(entityType, "Entity type cannot be empty or null");
         LOG.debug("Authorizing authenticatedUser={} against entity/instance action={}, "
                 + "entity name={}, entity type={}",

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
index 9b1e1f4..9b011b8 100644
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
+++ b/common/src/main/java/org/apache/falcon/workflow/WorkflowExecutionContext.java
@@ -304,11 +304,11 @@ public class WorkflowExecutionContext {
     }
 
     public int getWorkflowRunId() {
-        return Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID));
+        return Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID, "0"));
     }
 
     public String getWorkflowRunIdString() {
-        return String.valueOf(Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID)));
+        return String.valueOf(Integer.parseInt(getValue(WorkflowExecutionArgs.RUN_ID, "0")));
     }
 
     public String getWorkflowUser() {

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java b/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
index b692258..6d1332e 100644
--- a/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
+++ b/common/src/main/java/org/apache/falcon/workflow/WorkflowJobEndNotificationService.java
@@ -155,6 +155,7 @@ public class WorkflowJobEndNotificationService implements FalconService {
     private boolean updateContextFromWFConf(WorkflowExecutionContext context) throws FalconException {
         Properties wfProps = contextMap.get(context.getWorkflowId());
         if (wfProps == null) {
+            wfProps = new Properties();
             Entity entity = null;
             try {
                 entity = EntityUtil.getEntity(context.getEntityType(), context.getEntityName());
@@ -166,11 +167,12 @@ public class WorkflowJobEndNotificationService implements FalconService {
                 return false;
             }
             for (String cluster : EntityUtil.getClustersDefinedInColos(entity)) {
+                wfProps.setProperty(WorkflowExecutionArgs.CLUSTER_NAME.getName(), cluster);
                 try {
                     InstancesResult.Instance[] instances = WorkflowEngineFactory.getWorkflowEngine(entity)
                             .getJobDetails(cluster, context.getWorkflowId()).getInstances();
                     if (instances != null && instances.length > 0) {
-                        wfProps = getWFProps(instances[0].getWfParams());
+                        wfProps.putAll(getWFProps(instances[0].getWfParams()));
                         // Required by RetryService. But, is not part of conf.
                         wfProps.setProperty(WorkflowExecutionArgs.RUN_ID.getName(),
                                 Integer.toString(instances[0].getRunId()));
@@ -299,7 +301,7 @@ public class WorkflowJobEndNotificationService implements FalconService {
         }
         Long duration = (endTime.getTime() - startTime.getTime()) * 1000000;
 
-        if (context.hasWorkflowFailed()) {
+        if (!context.hasWorkflowSucceeded()) {
             GenericAlert.instrumentFailedInstance(clusterName, entityType,
                     entityName, nominalTime, workflowId, workflowUser, runId, operation,
                     SchemaHelper.formatDateUTC(startTime), "", "", duration);

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/main/resources/startup.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/startup.properties b/common/src/main/resources/startup.properties
index 2229edf..0990035 100644
--- a/common/src/main/resources/startup.properties
+++ b/common/src/main/resources/startup.properties
@@ -34,6 +34,7 @@
 *.application.services=org.apache.falcon.security.AuthenticationInitializationService,\
                         org.apache.falcon.workflow.WorkflowJobEndNotificationService, \
                         org.apache.falcon.service.ProcessSubscriberService,\
+                        org.apache.falcon.extensions.ExtensionService,\
                         org.apache.falcon.service.FeedSLAMonitoringService,\
                         org.apache.falcon.service.LifecyclePolicyMap,\
                         org.apache.falcon.entity.store.ConfigurationStore,\
@@ -43,8 +44,7 @@
                         org.apache.falcon.service.LogCleanupService,\
                         org.apache.falcon.service.GroupsService,\
                         org.apache.falcon.service.ProxyUserService,\
-                        org.apache.falcon.service.FalconJPAService,\
-                        org.apache.falcon.extensions.ExtensionService
+                        org.apache.falcon.service.FalconJPAService
 ##Add if you want to send data to graphite
 #                        org.apache.falcon.metrics.MetricNotificationService\
 ## Add if you want to use Falcon Azure integration ##
@@ -156,9 +156,20 @@ it.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandle
 *.falcon.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory
 
 # Graph Storage
-*.falcon.graph.storage.directory=${user.dir}/target/graphdb
-*.falcon.graph.storage.backend=berkeleyje
-*.falcon.graph.serialize.path=${user.dir}/target/graphdb
+# IMPORTANT:   Please enable one of the graph db backend: hbase or berkeleydb, per instructions below.
+
+# Enable the following for Berkeley DB.  Make sure je-5.0.73.jar is downloaded and available
+# under Falcon webapp directory or under falcon server classpath.
+#*.falcon.graph.storage.backend=berkeleyje
+#*.falcon.graph.storage.directory=/${falcon.home}/data/graphdb
+#*.falcon.graph.serialize.path=${user.dir}/target/graphdb
+
+# Enable the following for HBase
+#*.falcon.graph.storage.backend=hbase
+# For standalone mode , set hostname to localhost; for distributed mode, set to the zookeeper quorum
+# @see http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2
+#*.falcon.graph.storage.hostname=localhost
+#*.falcon.graph.storage.hbase.table=falcon_titan
 
 # Avoid acquiring read lock when iterating over large graphs
 # See http://s3.thinkaurelius.com/docs/titan/0.5.4/bdb.html

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java b/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
index c642fb8..f9aad19 100644
--- a/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
+++ b/common/src/test/java/org/apache/falcon/entity/parser/FeedEntityParserTest.java
@@ -977,7 +977,8 @@ public class FeedEntityParserTest extends AbstractTestBase {
         }
     }
 
-    @Test
+    // disable this test due to its validation of dummy s3 url no longer supported by latest hdfs (2.7.2 or above)
+    @Test (enabled = false)
     public void testValidateACLForArchiveReplication() throws Exception {
         StartupProperties.get().setProperty("falcon.security.authorization.enabled", "true");
         Assert.assertTrue(Boolean.valueOf(

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java b/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
index 23f69d7..b41cc03 100644
--- a/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
+++ b/common/src/test/java/org/apache/falcon/entity/v0/EntityGraphTest.java
@@ -20,17 +20,22 @@ package org.apache.falcon.entity.v0;
 
 import org.apache.falcon.entity.AbstractTestBase;
 import org.apache.falcon.entity.store.ConfigurationStore;
+import org.apache.falcon.entity.v0.feed.Feed;
+import org.apache.falcon.entity.v0.feed.Load;
 import org.apache.falcon.entity.v0.feed.Argument;
 import org.apache.falcon.entity.v0.feed.Arguments;
 import org.apache.falcon.entity.v0.feed.Clusters;
 import org.apache.falcon.entity.v0.feed.ClusterType;
 import org.apache.falcon.entity.v0.feed.Extract;
 import org.apache.falcon.entity.v0.feed.ExtractMethod;
-import org.apache.falcon.entity.v0.feed.Feed;
 import org.apache.falcon.entity.v0.feed.FieldsType;
 import org.apache.falcon.entity.v0.feed.FieldIncludeExclude;
 import org.apache.falcon.entity.v0.feed.Import;
 import org.apache.falcon.entity.v0.feed.MergeType;
+import org.apache.falcon.entity.v0.feed.Export;
+import org.apache.falcon.entity.v0.feed.LoadMethod;
+
+
 import org.apache.falcon.entity.v0.cluster.Cluster;
 import org.apache.falcon.entity.v0.datasource.Datasource;
 import org.apache.falcon.entity.v0.process.Input;
@@ -184,6 +189,36 @@ public class EntityGraphTest extends AbstractTestBase {
         return imp;
     }
 
+    private Feed addFeedExport(String feed, Cluster cluster, Datasource ds) {
+
+        Feed f1 = new Feed();
+        f1.setName(feed);
+        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
+                new org.apache.falcon.entity.v0.feed.Cluster();
+        feedCluster.setName(cluster.getName());
+        feedCluster.setType(ClusterType.SOURCE);
+        Clusters clusters = new Clusters();
+        clusters.getClusters().add(feedCluster);
+        f1.setClusters(clusters);
+
+        Export exp = getAnExport(LoadMethod.UPDATEONLY, ds);
+        f1.getClusters().getClusters().get(0).setExport(exp);
+        return f1;
+    }
+
+    private Export getAnExport(LoadMethod loadMethod, Datasource ds) {
+
+        org.apache.falcon.entity.v0.feed.Datasource target = new org.apache.falcon.entity.v0.feed.Datasource();
+        target.setName(ds.getName());
+        target.setTableName("test-table");
+        Load load = new Load();
+        load.setType(loadMethod);
+        target.setLoad(load);
+        Export exp = new Export();
+        exp.setTarget(target);
+        return exp;
+    }
+
     private void attachInput(Process process, Feed feed) {
         if (process.getInputs() == null) {
             process.setInputs(new Inputs());
@@ -382,6 +417,42 @@ public class EntityGraphTest extends AbstractTestBase {
     }
 
     @Test
+    public void testOnAddExport() throws Exception {
+
+        Datasource ds = new Datasource();
+        ds.setName("test-db");
+        ds.setColo("c1");
+
+        Cluster cluster = new Cluster();
+        cluster.setName("ci1");
+        cluster.setColo("c1");
+
+        Feed f1 = addFeedExport("fe1", cluster, ds);
+
+        store.publish(EntityType.CLUSTER, cluster);
+        store.publish(EntityType.DATASOURCE, ds);
+        store.publish(EntityType.FEED, f1);
+
+        Set<Entity> entities = graph.getDependents(cluster);
+        Assert.assertEquals(entities.size(), 1);
+        Assert.assertTrue(entities.contains(f1));
+
+        entities = graph.getDependents(ds);
+        Assert.assertEquals(entities.size(), 1);
+        Assert.assertTrue(entities.contains(f1));
+
+        entities = graph.getDependents(f1);
+        Assert.assertEquals(entities.size(), 2);
+        Assert.assertTrue(entities.contains(cluster));
+        Assert.assertTrue(entities.contains(ds));
+
+        store.remove(EntityType.FEED, "fe1");
+        store.remove(EntityType.DATASOURCE, "test-db");
+        store.remove(EntityType.CLUSTER, "ci1");
+    }
+
+
+    @Test
     public void testOnRemoveDatasource() throws Exception {
 
         Datasource ds = new Datasource();

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java b/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
index c0ae5fc..62db501 100644
--- a/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
+++ b/common/src/test/java/org/apache/falcon/metadata/MetadataMappingServiceTest.java
@@ -122,8 +122,10 @@ public class MetadataMappingServiceTest {
         configStore = ConfigurationStore.get();
 
         Services.get().register(new WorkflowJobEndNotificationService());
-        StartupProperties.get().setProperty("falcon.graph.storage.directory",
-                "target/graphdb-" + System.currentTimeMillis());
+        StartupProperties.get().setProperty("falcon.graph.storage.backend", "berkeleyje");
+        String graphDBDir = "target/graphdb-" + System.currentTimeMillis();
+        StartupProperties.get().setProperty("falcon.graph.storage.directory", graphDBDir);
+        StartupProperties.get().setProperty("falcon.graph.serialize.path", graphDBDir);
         StartupProperties.get().setProperty("falcon.graph.preserve.history", "true");
         service = new MetadataMappingService();
         service.init();

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java b/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
index 3a6d8c0..2196ad1 100644
--- a/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
+++ b/common/src/test/java/org/apache/falcon/security/DefaultAuthorizationProviderTest.java
@@ -315,7 +315,7 @@ public class DefaultAuthorizationProviderTest {
                 "admin", realUser, new String[]{"admin", });
 
         DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "list", null, "primary-cluster", proxyUgi);
+        provider.authorizeResource("instance", "list", null, "sample-process", proxyUgi);
         Assert.fail("Bad entity type");
     }
 
@@ -328,7 +328,7 @@ public class DefaultAuthorizationProviderTest {
                 "admin", realUser, new String[]{"admin", });
 
         DefaultAuthorizationProvider provider = new DefaultAuthorizationProvider();
-        provider.authorizeResource("entities", "list", "clusterz", "primary-cluster", proxyUgi);
+        provider.authorizeResource("instance", "list", "processz", "sample-process", proxyUgi);
         Assert.fail("Bad entity type");
     }
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/distro/pom.xml
----------------------------------------------------------------------
diff --git a/distro/pom.xml b/distro/pom.xml
index 4351400..858e692 100644
--- a/distro/pom.xml
+++ b/distro/pom.xml
@@ -23,7 +23,7 @@ limitations under the License.
   <parent>
     <groupId>org.apache.falcon</groupId>
     <artifactId>falcon-main</artifactId>
-    <version>0.10-SNAPSHOT</version>
+    <version>0.10</version>
   </parent>
   <artifactId>falcon-distro</artifactId>
   <description>Apache Falcon Distro</description>
@@ -52,6 +52,7 @@ limitations under the License.
                 <descriptor>../src/main/assemblies/src-package.xml</descriptor>
               </descriptors>
               <finalName>apache-falcon-distributed-${project.version}</finalName>
+              <tarLongFileMode>posix</tarLongFileMode>
             </configuration>
             <executions>
               <execution>
@@ -126,6 +127,7 @@ limitations under the License.
             <descriptor>../src/main/assemblies/src-package.xml</descriptor>
           </descriptors>
           <finalName>apache-falcon-${project.version}</finalName>
+          <tarLongFileMode>posix</tarLongFileMode>
         </configuration>
         <executions>
           <execution>

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/license/animate-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/animate-LICENSE.txt b/docs/license/animate-LICENSE.txt
new file mode 100644
index 0000000..76ba6f5
--- /dev/null
+++ b/docs/license/animate-LICENSE.txt
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Daniel Eden
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/license/cabin-font-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/cabin-font-LICENSE.txt b/docs/license/cabin-font-LICENSE.txt
new file mode 100644
index 0000000..03b1c52
--- /dev/null
+++ b/docs/license/cabin-font-LICENSE.txt
@@ -0,0 +1,95 @@
+Copyright (c) 2011, Pablo Impallari (www.impallari.com|impallari@gmail.com),
+Copyright (c) 2011, Igino Marini. (www.ikern.com|mail@iginomarini.com),
+with Reserved Font Name Cabin.
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded, 
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/license/ngMask-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/ngMask-LICENSE.txt b/docs/license/ngMask-LICENSE.txt
new file mode 100644
index 0000000..8fb8ea2
--- /dev/null
+++ b/docs/license/ngMask-LICENSE.txt
@@ -0,0 +1,13 @@
+Copyright (c) 2014, Carlos Andr� Oliveira <ca...@gmail.com>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/license/ngTagsInput-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/ngTagsInput-LICENSE.txt b/docs/license/ngTagsInput-LICENSE.txt
new file mode 100644
index 0000000..2e766e3
--- /dev/null
+++ b/docs/license/ngTagsInput-LICENSE.txt
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Michael Benford
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/license/normalize-LICENSE.txt
----------------------------------------------------------------------
diff --git a/docs/license/normalize-LICENSE.txt b/docs/license/normalize-LICENSE.txt
new file mode 100644
index 0000000..a9dc817
--- /dev/null
+++ b/docs/license/normalize-LICENSE.txt
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright � Nicolas Gallagher and Jonathan Neal
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
index 766f408..e3556e8 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <groupId>org.apache.falcon</groupId>
         <artifactId>falcon-main</artifactId>
-        <version>0.10-SNAPSHOT</version>
+        <version>0.10</version>
     </parent>
     <artifactId>falcon-docs</artifactId>
     <description>Apache Falcon Documentation</description>

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/Configuration.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Configuration.twiki b/docs/src/site/twiki/Configuration.twiki
index bfca3d8..98acb83 100644
--- a/docs/src/site/twiki/Configuration.twiki
+++ b/docs/src/site/twiki/Configuration.twiki
@@ -103,9 +103,6 @@ Oozie workflow completes. Falcon listens to Oozie notification via JMS. You need
 explained below. Falcon post processing feature continues to only send user notifications so enabling Oozie
 JMS notification is important.
 
-*NOTE : If Oozie JMS notification is not enabled, the Falcon features such as failure retry, late data handling and metadata
-service will be disabled for all entities on the server.*
-
 ---+++Enable Oozie JMS notification
 
    * Please add/change the following properties in oozie-site.xml in the oozie installation dir.
@@ -325,10 +322,16 @@ to <verbatim>$FALCON_HOME/conf/startup.properties</verbatim> before starting the
 For details on the same, refer to [[FalconNativeScheduler][Falcon Native Scheduler]]
 
 ---+++Titan GraphDB backend
-You can either choose to use 5.0.73 version of berkeleydb (the default for Falcon for the last few releases) or 1.1.x or later version HBase as the backend database. Falcon in its release distributions will have the titan storage plugins for both BerkeleyDB and HBase.
+GraphDB backend needs to be configured to properly start Falcon server.
+You can either choose to use 5.0.73 version of berkeleydb (the default for Falcon for the last few releases) or 1.1.x or later version HBase as the backend database.
+Falcon in its release distributions will have the titan storage plugins for both BerkeleyDB and HBase.
 
 ----++++Using BerkeleyDB backend
-Falcon distributions may not package berkeley db artifacts (je-5.0.73.jar) based on build profiles.  If Berkeley DB is not packaged, you can download the Berkeley DB jar file from the URL: <verbatim>http://download.oracle.com/otn/berkeley-db/je-5.0.73.zip</verbatim>.   The following properties describe an example berkeley db graph storage backend that can be specified in the configuration file <verbatim>$FALCON_HOME/conf/startup.properties</verbatim>.
+Falcon distributions may not package berkeley db artifacts (je-5.0.73.jar) based on build profiles.
+If Berkeley DB is not packaged, you can download the Berkeley DB jar file from the URL:
+<verbatim>http://download.oracle.com/otn/berkeley-db/je-5.0.73.zip</verbatim>.
+The following properties describe an example berkeley db graph storage backend that can be specified in the configuration file
+<verbatim>$FALCON_HOME/conf/startup.properties</verbatim>.
 
 <verbatim>
 # Graph Storage
@@ -337,13 +340,14 @@ Falcon distributions may not package berkeley db artifacts (je-5.0.73.jar) based
 *.falcon.graph.serialize.path=${user.dir}/target/graphdb
 </verbatim>
 
-----++++Using HBase backend
+---++++Using HBase backend
 
-To use HBase as the backend it is recommended that a HBase cluster be provisioned with distributed mode confiuguratoin, primarily because of the support of kerberos enabled clusters and HA considerations.  Based on build profile, a standalone hbase version can be packaged with the Falcon binary distribution.   Along with this, a template for <verbatim>hbase-site.xml</verbatim> is provided, which can be used to start the standalone mode HBase enviornment for development/testing purposes.
+To use HBase as the backend it is recommended that a HBase cluster be provisioned with distributed mode configuration primarily because of the support of kerberos enabled clusters and HA considerations.  Based on build profile, a standalone hbase version can be packaged with the Falcon binary distribution.   Along with this, a template for <verbatim>hbase-site.xml</verbatim> is provided, which can be used to start the standalone mode HBase enviornment for development/testing purposes.
 
-Basic configuration
+---++++ Basic configuration
 
 <verbatim>
+##### Falcon startup.properties
 *.falcon.graph.storage.backend=hbase
 #For standalone mode , specify localhost
 #for distributed mode, specify zookeeper quorum here - For more information refer http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2
@@ -362,7 +366,45 @@ We recommend that in the startup config the tablename for titan storage be named
 *.falcon.graph.storage.hbase.table=falcon_titan
 </verbatim>
 
-Permissions
+---++++Starting standalone HBase for testing
+
+HBase can be started in stand alone mode for testing as a backend for Titan. The following steps outline the config changes required:
+<verbatim>
+1. Build Falcon as below to package hbase binaries
+   $ export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m" && mvn clean assembly:assembly -Ppackage-standalone-hbase
+2. Configure HBase
+   a. When falcon tar file is expanded, HBase binaries are under ${FALCON_HOME}/hbase
+   b. Copy ${FALCON_HOME}/conf/hbase-site.xml.template into hbase conf dir in ${FALCON_HOME}/hbase/conf/hbase-site.xml
+   c. Set {hbase_home} property to point to a local dir
+   d. Standalone HBase starts zookeeper on the default port (2181). This port can be changed by adding the following to hbase-site.xml
+       <property>
+            <name>hbase.zookeeper.property.clientPort</name>
+            <value>2223</value>
+       </property>
+
+       <property>
+            <name>hbase.zookeeper.quorum</name>
+            <value>localhost</value>
+       </property>
+    e. set JAVA_HOME to point to Java 1.7 or above
+    f. Start hbase as ${FALCON_HOME}/hbase/bin/start-hbase.sh
+3. Configure Falcon
+   a. In ${FALCON_HOME}/conf/startup.properties, uncomment the following to enable HBase as the backend
+      *.falcon.graph.storage.backend=hbase
+      ### specify the zookeeper host and port name with which standalone hbase is started (see step 2)
+      ### by default, it will be localhost and port 2181
+      *.falcon.graph.storage.hostname=<zookeeper-host-name>:<zookeeper-host-port>
+      *.falcon.graph.serialize.path=${user.dir}/target/graphdb
+      *.falcon.graph.storage.hbase.table=falcon_titan
+      *.falcon.graph.storage.transactions=false
+4. Add HBase jars to Falcon classpath in ${FALCON_HOME}/conf/falcon-env.sh as:
+      FALCON_EXTRA_CLASS_PATH=`${FALCON_HOME}/hbase/bin/hbase classpath`
+5. Set the following in ${FALCON_HOME}/conf/startup.properties to disable SSL if needed
+      *.falcon.enableTLS=false
+6. Start Falcon
+</verbatim>
+
+---++++Permissions
 
 When Falcon is configured with HBase as the storage backend Titan needs to have sufficient authorizations to create and access an HBase table.  In a secure cluster it may be necessary to grant permissions to the <verbatim>falcon</verbatim> user for the <verbatim>falcon_titan</verbatim> table (or whateven tablename was specified for the property <verbatim>*.falcon.graph.storage.hbase.table</verbatim>
 
@@ -376,7 +418,7 @@ Without Ranger, HBase shell can be used to set the permissions.
    echo "grant 'falcon', 'RWXCA', 'falcon_titan'" | hbase shell
 </verbatim>
 
-Advanced configuration
+---++++Advanced configuration
 
 HBase storage backend support in Titan has a few other configurations and they can be set in <verbatim>$FALCON_HOME/conf/startup.properties</verbatim>, by prefixing the Titan property with <verbatim>*.falcon.graph</verbatim> prefix.
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/DataReplicationAzure.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/DataReplicationAzure.twiki b/docs/src/site/twiki/DataReplicationAzure.twiki
new file mode 100644
index 0000000..24e543b
--- /dev/null
+++ b/docs/src/site/twiki/DataReplicationAzure.twiki
@@ -0,0 +1,61 @@
+---+ Data Replication between On-premise Hadoop Clusters and Azure Cloud
+
+---++ Overview
+Falcon provides an easy way to replicate data between on-premise Hadoop clusters and Azure cloud.
+With this feature, users would be able to build a hybrid data pipeline,
+e.g. processing sensitive data on-premises for privacy and compliance reasons
+while leverage cloud for elastic scale and online services (e.g. Azure machine learning) with non-sensitive data.
+
+---++ Use Case
+1. Copy data from on-premise Hadoop clusters to Azure cloud
+2. Copy data from Azure cloud to on-premise Hadoop clusters
+3. Copy data within Azure cloud (i.e. from one Azure location to another).
+
+---++ Usage
+---+++ Set Up Azure Blob Credentials
+To move data to/from Azure blobs, we need to add Azure blob credentials in HDFS.
+This can be done by adding the credential property through Ambari HDFS configs, and HDFS needs to be restarted after adding the credential.
+You can also add the credential property to core-site.xml directly, but make sure you restart HDFS from command line instead of Ambari.
+Otherwise, Ambari will take the previous HDFS configuration without your Azure blob credentials.
+<verbatim>
+<property>
+      <name>fs.azure.account.key.{AZURE_BLOB_ACCOUNT_NAME}.blob.core.windows.net</name>
+      <value>{AZURE_BLOB_ACCOUNT_KEY}</value>
+</property>
+</verbatim>
+
+To verify you set up Azure credential properly, you can check if you are able to access Azure blob through HDFS, e.g.
+<verbatim>
+hadoop fs �ls wasb://{AZURE_BLOB_CONTAINER}@{AZURE_BLOB_ACCOUNT_NAME}.blob.core.windows.net/
+</verbatim>
+
+---+++ Replication Feed
+[[EntitySpecification][Falcon replication feed]] can be used for data replication to/from Azure cloud.
+You can specify WASB (i.e. Windows Azure Storage Blob) url in source or target locations.
+See below for an example of data replication from Hadoop cluster to Azure blob.
+Note that the clusters for the source and the target need to be different.
+Analogously, if you want to copy data from Azure blob, you can add Azure blob location to the source.
+<verbatim>
+<?xml version="1.0" encoding="UTF-8"?>
+<feed name="AzureReplication" xmlns="uri:falcon:feed:0.1">
+    <frequency>months(1)</frequency>
+    <clusters>
+        <cluster name="SampleCluster1" type="source">
+            <validity start="2010-06-01T00:00Z" end="2010-06-02T00:00Z"/>
+            <retention limit="days(90)" action="delete"/>
+        </cluster>
+        <cluster name="SampleCluster2" type="target">
+            <validity start="2010-06-01T00:00Z" end="2010-06-02T00:00Z"/>
+            <retention limit="days(90)" action="delete"/>
+            <locations>
+                <location type="data" path="wasb://replication-test@mystorage.blob.core.windows.net/replicated-${YEAR}-${MONTH}"/>
+            </locations>
+        </cluster>
+    </clusters>
+    <locations>
+        <location type="data" path="/apps/falcon/demo/data-${YEAR}-${MONTH}" />
+    </locations>
+    <ACL owner="ambari-qa" group="users" permission="0755"/>
+    <schema location="hcat" provider="hcat"/>
+</feed>
+</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/Embedded-mode.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Embedded-mode.twiki b/docs/src/site/twiki/Embedded-mode.twiki
index d5c37a1..47acab4 100644
--- a/docs/src/site/twiki/Embedded-mode.twiki
+++ b/docs/src/site/twiki/Embedded-mode.twiki
@@ -178,6 +178,7 @@ Submit and schedule the process:
 <verbatim>
 $bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/oozie-mr-process.xml
 $bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/pig-process.xml
+$bin/falcon entity -submitAndSchedule -type process -file examples/entity/spark/spark-process.xml
 </verbatim>
 Generate input data:
 <verbatim>
@@ -189,7 +190,7 @@ $bin/falcon instance -status -type process -name oozie-mr-process -start 2013-11
 </verbatim>
 
 HCat based example entities are in examples/entity/hcat.
-
+Spark based example entities are in examples/entity/spark.
 
 ---+++Stopping Falcon Server
 <verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/EntitySpecification.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/EntitySpecification.twiki b/docs/src/site/twiki/EntitySpecification.twiki
index b27e341..9f9e210 100644
--- a/docs/src/site/twiki/EntitySpecification.twiki
+++ b/docs/src/site/twiki/EntitySpecification.twiki
@@ -52,6 +52,11 @@ A registry interface specifies the interface for metadata catalog, such as Hive
 Falcon uses this interface to register/de-register partitions for a given database and table. Also,
 uses this information to schedule data availability events based on partitions in the workflow engine.
 Although Hive metastore supports both RPC and HTTP, Falcon comes with an implementation for RPC over thrift.
+For Hive HA mode, make sure the uris are separated with comma and you only add protocol "thrift://" at the beginning.
+See below for an example of Hive HA mode:
+<verbatim>
+<interface type="registry" endpoint="thrift://c6402.ambari.apache.org:9083,c6403.ambari.apache.org:9083" version="0.11.0" />
+</verbatim>
 
 <verbatim>
 <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true" version="5.4.6" />
@@ -828,13 +833,13 @@ be in lib folder inside the workflow path.
 The properties defined in the cluster and cluster properties(nameNode and jobTracker) will also
 be available for the workflow.
 
-There are 3 engines supported today.
+There are 4 engines supported today.
 
 ---++++ Oozie
 
 As part of oozie workflow engine support, users can embed a oozie workflow.
-Refer to oozie [[http://oozie.apache.org/docs/4.0.1/DG_Overview.html][workflow overview]] and
-[[http://oozie.apache.org/docs/4.0.1/WorkflowFunctionalSpec.html][workflow specification]] for details.
+Refer to oozie [[http://oozie.apache.org/docs/4.2.0/DG_Overview.html][workflow overview]] and
+[[http://oozie.apache.org/docs/4.2.0/WorkflowFunctionalSpec.html][workflow specification]] for details.
 
 Syntax:
 <verbatim>
@@ -897,6 +902,54 @@ This defines the workflow engine to be hive and the hive script is defined at
 Feeds with Hive table storage will send one more parameter apart from the general ones:
 <verbatim>$input_filter</verbatim>
 
+---++++ Spark
+Falcon also adds the Spark engine as part of Spark Integration which enables users to run the Java/Python Spark application as a process.
+When "spark" workflow engine is mentioned spark related parameters must be provided through <spark-attributes>
+Examples:
+<verbatim>
+<process name="spark-process">
+...
+    <workflow engine="spark" path="/resources/action">
+    <spark-attributes>
+          <master>local</master>
+          <name>Spark WordCount</name>
+          <class>org.examples.WordCount</class>
+          <jar>/resources/action/lib/spark-application.jar</jar>
+          <spark-opts>--num-executors 1 --driver-memory 512m</spark-opts>
+    </spark-attributes>
+...
+</process>
+</verbatim>
+
+This defines the workflow engine to be spark and Java/Python Spark application must be defined with "jar" option that need to be executed.
+There is flexibility to override the Spark master through process entity either to "yarn-client" or "yarn-cluster", if spark interface is already defined in cluster entity.
+Input and Output data to the Spark application will be set as argument when Spark workflow will be generated, if input and output feed entity is defined in the process entity.
+In the set of arguments, first argument will always correspond to input feed, second argument will always correspond to output feed and then user's provided argument will be set.
+
+
+For running the Spark SQL process entity, that read and write the data stored on Hive, the datanucleus jars under the $HIVE_HOME/lib directory and hive-site.xml
+under $SPARK_HOME/conf/ directory need to be available on the driver and all executors launched by the YARN cluster.
+The convenient way to do this is adding them through the --jars option and --file option of the spark-opts attribute.
+Example:
+<verbatim>
+<process name="spark-process">
+...
+    <workflow engine="spark" path="/resources/action">
+    <spark-attributes>
+        <master>local</master>
+        <name>Spark SQL</name>
+        <class>org.examples.SparkSQLProcessTable</class>
+        <jar>/resources/action/lib/spark-application.jar</jar>
+        <spark-opts>--num-executors 1 --driver-memory 512m --jars /usr/local/hive/lib/datanucleus-rdbms.jar,/usr/local/hive/lib/datanucleus-core.jar,/usr/local/hive/lib/datanucleus-api-jdo.jar --files /usr/local/spark/conf/hive-site.xml</spark-opts>
+    </spark-attributes>
+...
+</process>
+</verbatim>
+
+Input and Output to the Spark SQL application will be set as argument when Spark workflow will be generated, if input and output feed entity is defined in the process entity.
+If input feed is of table type, then input table partition, table name and database name will be set as input arguments. If output feed is of table type, then output table partition, table name and database name will be set as output arguments.
+Once input and output arguments is set, then user's provided argument will be set.
+
 ---+++ Retry
 Retry policy defines how the workflow failures should be handled. Three retry policies are defined: periodic, exp-backoff(exponential backoff) and final. Depending on the delay and number of attempts, the workflow is re-tried after specific intervals. If user sets the onTimeout attribute to "true", retries will happen for TIMED_OUT instances.
 Syntax:

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/Extensions.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Extensions.twiki b/docs/src/site/twiki/Extensions.twiki
index cf88c87..a3fed4e 100644
--- a/docs/src/site/twiki/Extensions.twiki
+++ b/docs/src/site/twiki/Extensions.twiki
@@ -53,6 +53,7 @@ config name: *.application.services
 config value: org.apache.falcon.extensions.ExtensionService
 </verbatim>
 
+ExtensionService should be added before ConfigurationStore in startup properties for application services configuration.
 For manual installation user is expected to update "extension.store.uri" property defined in startup properties with
 HDFS path where the extension artifacts will be copied to.
 Extension artifacts in addons/extensions are packaged in falcon. For manual installation once the Falcon Server is setup user is expected to copy the extension artifacts under {falcon-server-dir}/extensions to HDFS at "extension.store.uri" path defined in startup properties and then restart Falcon.
@@ -60,5 +61,5 @@ Extension artifacts in addons/extensions are packaged in falcon. For manual inst
 ---++ Migration
 Recipes framework and HDFS mirroring capability was added in Apache Falcon 0.6.0 release and it was client side logic. With 0.10 release its moved to server side and renamed as server side extensions. Client side recipes only had CLI support and expected certain pre steps to get it working. This is no longer required in 0.10 release as new CLI and REST API support has been provided.
 
-If user is migrating to 0.10 release and above then old Recipe setup and CLI's won't work. For manual installation user is expected to copy Extension artifacts to HDFS. Please refer "Packaging and installation" section above for more details.
+Migrating to 0.10 release and above is not backward compatible for Recipes. If user is migrating to 0.10 release and above then old Recipe setup and CLI's won't work. For manual installation user is expected to copy Extension artifacts to HDFS. Please refer "Packaging and installation" section above for more details.
 Please Refer to [[falconcli/FalconCLI][Falcon CLI]] and [[restapi/ResourceList][REST API]] for more details on usage of CLI and REST API's for extension jobs and instances management.

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/FalconDocumentation.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/FalconDocumentation.twiki b/docs/src/site/twiki/FalconDocumentation.twiki
index 4848746..fe1c0de 100644
--- a/docs/src/site/twiki/FalconDocumentation.twiki
+++ b/docs/src/site/twiki/FalconDocumentation.twiki
@@ -447,9 +447,11 @@ cluster, (no dirty reads)
 
 ---+++ Archival as Replication
 
-Falcon allows users to archive data from on-premice to cloud, either Azure WASB or S3.
+Falcon allows users to archive data from on-premise to cloud, either Azure WASB or S3.
 It uses the underlying replication for archiving data from source to target. The archival URI is
 specified as the overridden location for the target cluster.
+Note that for data replication between on-premise and Azure cloud, Azure credentials need to be added to core-site.xml.
+Please refer to [[DataReplicationAzure][AzureDataReplication]] for details and examples.
 
 *Example:*
 <verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/GraphiteMetricCollection.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/GraphiteMetricCollection.twiki b/docs/src/site/twiki/GraphiteMetricCollection.twiki
new file mode 100644
index 0000000..c76e68d
--- /dev/null
+++ b/docs/src/site/twiki/GraphiteMetricCollection.twiki
@@ -0,0 +1,22 @@
+---++Graphite Metric Collection
+
+Graphite Metric Collection currently allows to collect the following metrics at process level :
+
+1. Processing time the process spent in the running state in seconds (workflow_end_time - workflow_start_time)
+2. Wait time that the process spent in the waiting/ready state. (workflow_start_time - workflow_nominal_time)
+3. Number of instances that are failed for a process.
+
+To send data to graphite we need to intialize metricNotificationService in startup.properties:
+
+*.application.services= org.apache.falcon.metrics.MetricNotificationService,
+
+
+Add following properties for graphiteNotificationPlugin :
+
+Graphite properties
+   * *.falcon.graphite.hostname=localhost
+   * *.falcon.graphite.port=2003
+   * *.falcon.graphite.frequency=1
+   * *.falcon.graphite.prefix=falcon
+
+The falcon.graphite.frequency is in seconds and all the time that is being sent to graphite is in seconds.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/InstallationSteps.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/InstallationSteps.twiki b/docs/src/site/twiki/InstallationSteps.twiki
index 93b1eab..297d88e 100644
--- a/docs/src/site/twiki/InstallationSteps.twiki
+++ b/docs/src/site/twiki/InstallationSteps.twiki
@@ -27,16 +27,15 @@ $ mvn clean install
 </verbatim>
 It builds and installs the package into the local repository, for use as a dependency in other projects locally.
 
-[optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of Hadoop]
-
-*NOTE:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards
-[optionally -Doozie.version=<<oozie version>> can be appended to build with a specific version of Oozie. Oozie versions
->= 4 are supported]
-NOTE: Falcon builds with JDK 1.7/1.8 using -noverify option
-      To compile Falcon with Hive Replication, optionally "-P hadoop-2,hivedr" can be appended. For this Hive >= 1.2.0
-      and Oozie >= 4.2.0 should be available.
+[optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of hadoop]
 
+*Note 1:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards
+          Falcon build with JDK 1.7 using -noverify option
 
+*Note 2:* To compile Falcon with addon extensions, append additional profiles to build command using syntax -P<<profile1,profile2>>
+          For Hive Mirroring extension, use profile"hivedr". Hive >= 1.2.0 and Oozie >= 4.2.0 is required
+          For HDFS Snapshot mirroring extension, use profile "hdfs-snapshot-mirroring". Hadoop >= 2.7.0 is required
+          For ADF integration, use profile "adf"
 
 ---+++Step 3 - Package and Deploy Falcon
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/MigrationInstructions.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/MigrationInstructions.twiki b/docs/src/site/twiki/MigrationInstructions.twiki
index 7c0e027..a11dbc4 100644
--- a/docs/src/site/twiki/MigrationInstructions.twiki
+++ b/docs/src/site/twiki/MigrationInstructions.twiki
@@ -1,15 +1,32 @@
 ---+ Migration Instructions
 
----++ Migrate from 0.5-incubating to 0.6-incubating
+---++ Migrate from 0.9 to 0.10
 
-This is a placeholder wiki for migration instructions from falcon 0.5-incubating to 0.6-incubating.
+FALCON-1333 (Instance Search feature) requires Falcon to use titan-berkeleyje version 0.5.4 to support indexing.
+Up until version 0.9 - Falcon used titan-berkeleyje-jre6 version 0.4.2. GraphDB created by version 0.4.2 cannot be
+read by version 0.5.4. The solution is to migrate the GraphDB to be compatible with Falcon 0.10 release. Please make
+sure that no falcon server is running while performing the migration.
 
----+++ Update Entities
+---+++ 1. Install Falcon 0.10
+Install Falcon 0.10 by following the [[InstallationSteps][Installation Steps]]. Do not start the falcon server yet.
+The tool to migrate graphDB is packaged with 0.10 Falcon server in falcon-common-0.10.jar.
 
----+++ Change cluster dir permissions
+---+++ 2. Export GraphDB to JSON file using Falcon 0.9
+Please run the following command to generate the JSON file.
 
----+++ Enable/Disable TLS
+<verbatim>
+ $FALCON_HOME/bin/graphdbutil.sh export <<java_home> <<hadoop_home>> <<falcon_0.9_home>> <<path_to_falcon-common-0.10.jar>> /jsonFile/dir/
+</verbatim>
 
----+++ Authorization
+This command will create /jsonFile/dir/instanceMetadata.json
 
+---+++ 3. Import GraphDB from JSON file using Falcon 0.10
+Please run the following command to import graphDB the JSON file. The location of graphDB will be based on property
+"*.falcon.graph.storage.directory" set in startup.properties file.
+
+<verbatim>
+  $FALCON_HOME/bin/graphdbutil.sh export <<java_home> <<hadoop_home>> <<falcon_0.10_home>> <<path_to_falcon-common-0.10.jar>> /jsonFile/dir/
+</verbatim>
+
+This command will import from /jsonFile/dir/instanceMetadata.json, now start the Falcon 0.10 server.
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/Operability.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Operability.twiki b/docs/src/site/twiki/Operability.twiki
index 616af36..2bccb51 100644
--- a/docs/src/site/twiki/Operability.twiki
+++ b/docs/src/site/twiki/Operability.twiki
@@ -223,3 +223,8 @@ under ${user.dir}/logs/ directory.
 
 Users may also extend the Falcon Audit plugin to send audits to systems like Apache Argus, etc. by
 extending org.apache.falcon.plugin.AuditingPlugin interface.
+
+
+---++ Metrics Collection In Graphite
+
+Falcon has support to send metrics to graphite more details regarding this can be found on [[GraphiteMetricCollection][Graphite Metric Collection]]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/restapi/EntityList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/EntityList.twiki b/docs/src/site/twiki/restapi/EntityList.twiki
index 2c2a734..a439dc7 100644
--- a/docs/src/site/twiki/restapi/EntityList.twiki
+++ b/docs/src/site/twiki/restapi/EntityList.twiki
@@ -1,4 +1,4 @@
----++  GET /api/entities/list/:entity-type?fields=:fields
+---++  GET /api/entities/list/{:entity-type}
    * <a href="#Description">Description</a>
    * <a href="#Parameters">Parameters</a>
    * <a href="#Results">Results</a>
@@ -8,7 +8,7 @@
 Get list of the entities.
 
 ---++ Parameters
-   * :entity-type Comma-separated entity types. Can be empty. Valid entity types are cluster, feed or process.
+   * :entity-type <optional param> Comma-separated entity types. Valid entity types are cluster, feed or process.
    * fields <optional param> Fields of entity that the user wants to view, separated by commas.
       * Valid options are STATUS, TAGS, PIPELINES, CLUSTERS.
    * nameseq <optional param> Subsequence of entity name. Not case sensitive.
@@ -38,20 +38,28 @@ Total number of results and a list of entities.
 ---++ Examples
 ---+++ Rest Call
 <verbatim>
-GET http://localhost:15000/api/entities/list/feed
+GET http://localhost:15000/api/entities/list
 </verbatim>
 ---+++ Result
 <verbatim>
 {
-    "totalResults":"2\u201d,
+    "totalResults":"4\u201d,
     "entity": [
         {
-            "name": "SampleOutput",
-            "type": "feed"
+            "name"  : "SampleCluster1",
+            "type"  : "cluster"
+        }
+        {
+            "name"  : "SampleOutput",
+            "type"  : "feed"
         },
         {
-            "name": "SampleInput",
-            "type": "feed"
+            "name"  : "SampleInput",
+            "type"  : "feed"
+        }
+        {
+            "name"  : "SampleProcess1",
+            "type"  : "process"
         }
     ]
 }
@@ -59,28 +67,20 @@ GET http://localhost:15000/api/entities/list/feed
 
 ---+++ Rest Call
 <verbatim>
-GET http://localhost:15000/api/entities/list
+GET http://localhost:15000/api/entities/list/feed
 </verbatim>
 ---+++ Result
 <verbatim>
 {
-    "totalResults":"4\u201d,
+    "totalResults":"2\u201d,
     "entity": [
         {
-            "name"  : "SampleCluster1",
-            "type"  : "cluster"
-        }
-        {
-            "name"  : "SampleOutput",
-            "type"  : "feed"
+            "name": "SampleOutput",
+            "type": "feed"
         },
         {
-            "name"  : "SampleInput",
-            "type"  : "feed"
-        }
-        {
-            "name"  : "SampleProcess1",
-            "type"  : "process"
+            "name": "SampleInput",
+            "type": "feed"
         }
     ]
 }

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/docs/src/site/twiki/restapi/ResourceList.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/restapi/ResourceList.twiki b/docs/src/site/twiki/restapi/ResourceList.twiki
index aadd14f..5a4c253 100644
--- a/docs/src/site/twiki/restapi/ResourceList.twiki
+++ b/docs/src/site/twiki/restapi/ResourceList.twiki
@@ -54,7 +54,7 @@ The current version of the rest api's documentation is also hosted on the Falcon
 | DELETE      | [[EntityDelete][api/entities/delete/:entity-type/:entity-name]]             | Delete the entity                  |
 | GET         | [[EntityStatus][api/entities/status/:entity-type/:entity-name]]             | Get the status of the entity       |
 | GET         | [[EntityDefinition][api/entities/definition/:entity-type/:entity-name]]     | Get the definition of the entity   |
-| GET         | [[EntityList][api/entities/list/:entity-type]]                              | Get the list of entities           |
+| GET         | [[EntityList][api/entities/list/{:entity-type}]]                            | Get the list of entities           |
 | GET         | [[EntitySummary][api/entities/summary/:entity-type/:cluster]]               | Get instance summary of all entities |
 | GET         | [[EntityDependencies][api/entities/dependencies/:entity-type/:entity-name]] | Get the dependencies of the entity |
 | GET         | [[FeedSLA][api/entities/sla-alert/:entity-type]]                            | Get pending feed instances which missed sla |

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/examples/app/spark/wordcount.py
----------------------------------------------------------------------
diff --git a/examples/app/spark/wordcount.py b/examples/app/spark/wordcount.py
new file mode 100644
index 0000000..b6ba426
--- /dev/null
+++ b/examples/app/spark/wordcount.py
@@ -0,0 +1,52 @@
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+from __future__ import print_function
+
+import sys
+from operator import add
+
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+    if len(sys.argv) < 2:
+        print("Usage: wordcount <file>", file=sys.stderr)
+        exit(-1)
+    sc = SparkContext(appName="Python WordCount")
+    
+    # Read input and output path
+    inputPath = sys.argv[1]
+    print ('Path of input file ->' + inputPath)
+    outputPath = sys.argv[2]
+    print ('Path of output file ->' + outputPath)
+    
+    distFile = sc.textFile(inputPath)
+    
+    def flatMap(line):
+        return line.split(",")
+    
+    def map(word):
+        return (word,1)
+    
+    def reduce(a,b):
+        return a+b
+    
+    
+    counts = distFile.flatMap(flatMap).map(map).reduceByKey(reduce)
+    
+    counts.saveAsTextFile(outputPath)

http://git-wip-us.apache.org/repos/asf/falcon/blob/01a303e3/examples/entity/spark/pyspark-process.xml
----------------------------------------------------------------------
diff --git a/examples/entity/spark/pyspark-process.xml b/examples/entity/spark/pyspark-process.xml
new file mode 100644
index 0000000..de4b5cd
--- /dev/null
+++ b/examples/entity/spark/pyspark-process.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="pyspark-process" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <cluster name="local">
+            <validity start="2013-11-15T00:05Z" end="2013-11-15T01:05Z"/>
+        </cluster>
+    </clusters>
+
+    <parallel>1</parallel>
+    <order>LIFO</order>
+    <frequency>minutes(5)</frequency>
+    <timezone>UTC</timezone>
+
+    <inputs>
+        <!-- In the workflow, the input paths will be available in a variable 'inpaths' -->
+        <input name="inpaths" feed="in" start="now(0,-5)" end="now(0,-1)"/>
+    </inputs>
+
+    <outputs>
+        <!-- In the workflow, the output path will be available in a variable 'outpath' -->
+        <output name="outpath" feed="out" instance="now(0,0)"/>
+    </outputs>
+
+    <workflow engine="spark" path="/app/spark"/>
+    <spark-attributes>
+        <master>local</master>
+        <name>Python Spark Wordcount</name>
+        <jar>/app/spark/wordcount.py</jar>
+        <spark-opts>--num-executors 1 --driver-memory 512m --executor-memory 512m --executor-cores 1</spark-opts>
+    </spark-attributes>
+
+    <retry policy="periodic" delay="minutes(3)" attempts="3"/>
+
+</process>