You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by pa...@apache.org on 2016/03/01 08:24:15 UTC
[1/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Repository: falcon
Updated Branches:
refs/heads/asf-site 8609ffd6f -> 6f5b476cc
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
deleted file mode 100644
index 8d00bb5..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure.properties
+++ /dev/null
@@ -1,108 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-falcon.recipe.name=hive-disaster-recovery
-
-
-##### Workflow properties
-falcon.recipe.workflow.name=hive-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/recipes/hive-replication/hive-disaster-recovery-secure-workflow.xml
-
-##### Cluster properties
-
-# Change the cluster name where replication job should run here
-falcon.recipe.cluster.name=backupCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://localhost:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2014-10-01T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-# Change the cluster namenode kerberos principal. This is mandatory on secure clusters.
-falcon.recipe.nn.principal=nn/_HOST@EXAMPLE.COM
-
-##### Scheduling properties
-
-# Change the process frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(60)
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=owner=landing,pipeline=adtech
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-#falcon.recipe.acl.owner=testuser
-#falcon.recipe.acl.group=group
-#falcon.recipe.acl.permission=0x755
-
-##### Custom Job properties
-
-##### Source Cluster DR properties
-sourceCluster=primaryCluster
-sourceMetastoreUri=thrift://localhost:9083
-sourceHiveServer2Uri=hive2://localhost:10000
-# For DB level replicaiton to replicate multiple databases specify comma separated list of tables
-sourceDatabase=default
-# For DB level replication specify * for sourceTable.
-# For table level replication to replicate multiple tables specify comma separated list of tables
-sourceTable=testtable_dr
-## Please specify staging dir in the source without fully qualified domain name.
-sourceStagingPath=/apps/hive/tools/dr
-sourceNN=hdfs://localhost:8020
-# Specify kerberos principal required to access source namenode and hive servers, optional on non-secure cluster.
-sourceNNKerberosPrincipal=nn/_HOST@EXAMPLE.COM
-sourceHiveMetastoreKerberosPrincipal=hive/_HOST@EXAMPLE.COM
-sourceHive2KerberosPrincipal=hive/_HOST@EXAMPLE.COM
-
-##### Target Cluster DR properties
-targetCluster=backupCluster
-targetMetastoreUri=thrift://localhost:9083
-targetHiveServer2Uri=hive2://localhost:10000
-## Please specify staging dir in the target without fully qualified domain name.
-targetStagingPath=/apps/hive/tools/dr
-targetNN=hdfs://localhost:8020
-# Specify kerberos principal required to access target namenode and hive servers, optional on non-secure cluster.
-targetNNKerberosPrincipal=nn/_HOST@EXAMPLE.COM
-targetHiveMetastoreKerberosPrincipal=hive/_HOST@EXAMPLE.COM
-targetHive2KerberosPrincipal=hive/_HOST@EXAMPLE.COM
-
-# To ceil the max events processed each time job runs. Set it to max value depending on your bandwidth limit.
-# Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!
-maxEvents=-1
-# Change it to specify the maximum number of mappers for replication
-replicationMaxMaps=5
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
deleted file mode 100644
index f0de091..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-template.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<process name="##name##" xmlns="uri:falcon:process:0.1">
- <clusters>
- <!-- source -->
- <cluster name="##cluster.name##">
- <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
- </cluster>
- </clusters>
-
- <tags>_falcon_mirroring_type=HIVE</tags>
-
- <parallel>1</parallel>
- <!-- Replication needs to run only once to catch up -->
- <order>LAST_ONLY</order>
- <frequency>##process.frequency##</frequency>
- <timezone>UTC</timezone>
-
- <properties>
- <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
- </properties>
-
- <workflow name="##workflow.name##" engine="oozie"
- path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
- <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
- <notification type="##notification.type##" to="##notification.receivers##"/>
- <ACL/>
-</process>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
deleted file mode 100644
index 296e049..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-workflow.xml
+++ /dev/null
@@ -1,249 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
- <start to='last-event'/>
- <action name="last-event">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>lastevents</arg>
- </java>
- <ok to="export-dr-replication"/>
- <error to="fail"/>
- </action>
- <!-- Export Replication action -->
- <action name="export-dr-replication">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-replicationMaxMaps</arg>
- <arg>${replicationMaxMaps}</arg>
- <arg>-distcpMaxMaps</arg>
- <arg>${distcpMaxMaps}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-distcpMapBandwidth</arg>
- <arg>${distcpMapBandwidth}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>export</arg>
- <arg>-counterLogDir</arg>
- <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
- </java>
- <ok to="import-dr-replication"/>
- <error to="fail"/>
- </action>
- <!-- Import Replication action -->
- <action name="import-dr-replication">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-replicationMaxMaps</arg>
- <arg>${replicationMaxMaps}</arg>
- <arg>-distcpMaxMaps</arg>
- <arg>${distcpMaxMaps}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-distcpMapBandwidth</arg>
- <arg>${distcpMapBandwidth}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>import</arg>
- </java>
- <ok to="end"/>
- <error to="fail"/>
- </action>
- <kill name="fail">
- <message>
- Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
- </message>
- </kill>
- <end name="end"/>
-</workflow-app>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
deleted file mode 100644
index b14ec7c..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery.properties
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-falcon.recipe.name=hive-disaster-recovery
-
-
-##### Workflow properties
-falcon.recipe.workflow.name=hive-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/recipes/hive-replication/hive-disaster-recovery-workflow.xml
-
-##### Cluster properties
-
-# Change the cluster name where replication job should run here
-falcon.recipe.cluster.name=backupCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://localhost:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2014-10-01T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-
-##### Scheduling properties
-
-# Change the process frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(60)
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=owner=landing,pipeline=adtech
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-#falcon.recipe.acl.owner=testuser
-#falcon.recipe.acl.group=group
-#falcon.recipe.acl.permission=0x755
-
-##### Custom Job properties
-
-##### Source Cluster DR properties
-sourceCluster=primaryCluster
-sourceMetastoreUri=thrift://localhost:9083
-sourceHiveServer2Uri=hive2://localhost:10000
-# For DB level replicaiton to replicate multiple databases specify comma separated list of tables
-sourceDatabase=default
-# For DB level replication specify * for sourceTable.
-# For table level replication to replicate multiple tables specify comma separated list of tables
-sourceTable=testtable_dr
-## Please specify staging dir in the source without fully qualified domain name.
-sourceStagingPath=/apps/hive/tools/dr
-sourceNN=hdfs://localhost:8020
-
-##### Target Cluster DR properties
-targetCluster=backupCluster
-targetMetastoreUri=thrift://localhost:9083
-targetHiveServer2Uri=hive2://localhost:10000
-## Please specify staging dir in the target without fully qualified domain name.
-targetStagingPath=/apps/hive/tools/dr
-targetNN=hdfs://localhost:8020
-
-# To ceil the max events processed each time job runs. Set it to max value depending on your bandwidth limit.
-# Setting it to -1 will process all the events but can hog up the bandwidth. Use it judiciously!
-maxEvents=-1
-# Change it to specify the maximum number of mappers for replication
-replicationMaxMaps=5
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file
[6/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/TableFeed.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/TableFeed.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/TableFeed.java
deleted file mode 100644
index a3e11ef..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/TableFeed.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.adfservice.util.FSUtils;
-import org.apache.falcon.FalconException;
-
-import java.net.URISyntaxException;
-
-/**
- * Table Feed.
- */
-public class TableFeed extends Feed {
- private static final String TABLE_FEED_TEMPLATE_FILE = "table-feed.xml";
- private static final String TABLE_PARTITION_SEPARATOR = "#";
-
- private String tableName;
- private String partitions;
-
- public TableFeed(final Builder builder) {
- this.feedName = builder.tableFeedName;
- this.clusterName = builder.feedClusterName;
- this.frequency = builder.feedFrequency;
- this.startTime = builder.feedStartTime;
- this.endTime = builder.feedEndTime;
- this.tableName = builder.feedTableName;
- this.aclOwner = builder.feedAclOwner;
- this.partitions = builder.feedPartitions;
- }
-
- private String getTable() {
- return tableName + TABLE_PARTITION_SEPARATOR + partitions;
- }
-
- @Override
- public String getEntityxml() throws FalconException {
- try {
- String template = FSUtils.readHDFSFile(ADFJob.TEMPLATE_PATH_PREFIX, TABLE_FEED_TEMPLATE_FILE);
- return template.replace("$feedName$", feedName)
- .replace("$frequency$", frequency)
- .replace("$startTime$", startTime)
- .replace("$endTime$", endTime)
- .replace("$cluster$", clusterName)
- .replace("$table$", getTable())
- .replace("$aclowner$", aclOwner);
- } catch (URISyntaxException e) {
- throw new FalconException("Error when generating entity xml for table feed", e);
- }
- }
-
- /**
- * Builder for table Feed.
- */
- public static class Builder {
- private String tableFeedName;
- private String feedClusterName;
- private String feedFrequency;
- private String feedStartTime;
- private String feedEndTime;
- private String feedTableName;
- private String feedAclOwner;
- private String feedPartitions;
-
- public TableFeed build() {
- return new TableFeed(this);
- }
-
- public Builder withFeedName(final String feedName) {
- this.tableFeedName = feedName;
- return this;
- }
-
- public Builder withClusterName(final String clusterName) {
- this.feedClusterName = clusterName;
- return this;
- }
-
- public Builder withFrequency(final String frequency) {
- this.feedFrequency = frequency;
- return this;
- }
-
- public Builder withStartTime(final String startTime) {
- this.feedStartTime = startTime;
- return this;
- }
-
- public Builder withEndTime(final String endTime) {
- this.feedEndTime = endTime;
- return this;
- }
-
- public Builder withTableName(final String tableName) {
- this.feedTableName = tableName;
- return this;
- }
-
- public Builder withAclOwner(final String aclOwner) {
- this.feedAclOwner = aclOwner;
- return this;
- }
-
- public Builder withPartitions(final String partitions) {
- this.feedPartitions = partitions;
- return this;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/util/ADFJsonConstants.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/util/ADFJsonConstants.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/util/ADFJsonConstants.java
deleted file mode 100644
index 9e48685..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/util/ADFJsonConstants.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice.util;
-
-/**
- * ADF JSON Constants in ADF request.
- */
-public final class ADFJsonConstants {
-
- public static final String ADF_REQUEST_ACTIVITY = "activity";
- public static final String ADF_REQUEST_TRANSFORMATION = "transformation";
- public static final String ADF_REQUEST_TYPE = "type";
- public static final String ADF_REQUEST_JOBID = "jobId";
- public static final String ADF_REQUEST_START_TIME = "dataSliceStart";
- public static final String ADF_REQUEST_END_TIME = "dataSliceEnd";
- public static final String ADF_REQUEST_SCHEDULER = "scheduler";
- public static final String ADF_REQUEST_POLICY = "policy";
- public static final String ADF_REQUEST_TIMEOUT = "timeout";
- public static final String ADF_REQUEST_FREQUENCY = "frequency";
- public static final String ADF_REQUEST_INTERVAL = "interval";
- public static final String ADF_REQUEST_LINKED_SERVICES = "linkedServices";
- public static final String ADF_REQUEST_NAME = "name";
- public static final String ADF_REQUEST_INPUTS = "inputs";
- public static final String ADF_REQUEST_OUTPUTS = "outputs";
- public static final String ADF_REQUEST_TABLES = "tables";
- public static final String ADF_REQUEST_PROPERTIES = "properties";
- public static final String ADF_REQUEST_EXTENDED_PROPERTIES = "extendedProperties";
- public static final String ADF_REQUEST_CLUSTER_NAME = "clusterName";
- public static final String ADF_REQUEST_RUN_ON_BEHALF_USER = "runOnBehalf";
- public static final String ADF_REQUEST_LOCATION = "location";
- public static final String ADF_REQUEST_FOLDER_PATH = "folderPath";
- public static final String ADF_REQUEST_SCRIPT = "script";
- public static final String ADF_REQUEST_SCRIPT_PATH = "scriptPath";
- public static final String ADF_REQUEST_LINKED_SERVICE_NAME = "linkedServiceName";
- public static final String ADF_REQUEST_TABLE_NAME = "tableName";
- public static final String ADF_REQUEST_TABLE_PARTITION = "partitionedBy";
- public static final String ADF_REQUEST_LOCATION_TYPE_AZURE_BLOB = "AzureBlobLocation";
- public static final String ADF_REQUEST_CONNECTION_STRING = "connectionString";
- public static final String ADF_REQUEST_BLOB_ACCOUNT_NAME = "AccountName=";
-
- public static final String ADF_STATUS_PROTOCOL = "TransportProtocolVersion";
- public static final String ADF_STATUS_JOBID = "JobId";
- public static final String ADF_STATUS_STATUS = "Status";
- public static final String ADF_STATUS_PROGRESS = "Progress";
- public static final String ADF_STATUS_LOG_URL = "LogURL";
- public static final String ADF_STATUS_ERROR_TYPE = "ErrorType";
- public static final String ADF_STATUS_ERROR_MESSAGE = "ErrorMessage";
- public static final String ADF_STATUS_PROTOCOL_NAME = "2.0-preview";
- public static final String ADF_STATUS_ERROR_TYPE_VALUE = "UserError";
- public static final String ADF_STATUS_SUCCEEDED = "Succeeded";
- public static final String ADF_STATUS_FAILED = "Failed";
- public static final String ADF_STATUS_EXECUTING = "Executing";
- public static final String ADF_STATUS_CANCELED = "Canceled";
-
- private ADFJsonConstants() {
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/util/FSUtils.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/util/FSUtils.java
deleted file mode 100644
index 58abfbf..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/util/FSUtils.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.hadoop.HadoopClientFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.net.URISyntaxException;
-
-/**
- * Utility for file operations.
- */
-public final class FSUtils {
- private static final Logger LOG = LoggerFactory.getLogger(FSUtils.class);
- private FSUtils() {
- }
-
- public static String readHDFSFile(final String filePath, final String fileName)
- throws URISyntaxException, FalconException {
- BufferedReader br = null;
- try {
- Path path = new Path(filePath, fileName);
- FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(path.toUri());
- br = new BufferedReader(new InputStreamReader(fs.open(path)));
- StringBuilder fileContent = new StringBuilder();
- String line;
- while (true) {
- line = br.readLine();
- if (line == null) {
- break;
- }
- fileContent.append(line);
- }
- return fileContent.toString();
- } catch (IOException e) {
- throw new FalconException("Error reading file from hdfs: " + filePath + fileName + ": " + e.toString(), e);
- } finally {
- IOUtils.closeQuietly(br);
- }
- }
-
- public static String createFile(final Path path,
- final String content) throws FalconException {
- OutputStream out = null;
- try {
- FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(path.toUri());
- out = fs.create(path);
- out.write(content.getBytes());
- } catch (IOException e) {
- throw new FalconException("Error preparing script file: " + path, e);
- } finally {
- IOUtils.closeQuietly(out);
- }
- return path.toString();
- }
-
- public static void createDir(final Path dirPath) throws FalconException {
- FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(dirPath.toUri());
- try {
- if (!fs.exists(dirPath)) {
- LOG.info("Creating directory: {}", dirPath);
- HadoopClientFactory.mkdirsWithDefaultPerms(fs, dirPath);
- }
- } catch (IOException e) {
- throw new FalconException("Error creating directory: " + dirPath, e);
- }
- }
-
- public static void removeDir(final Path dirPath) throws FalconException {
- FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(dirPath.toUri());
- try {
- fs.delete(dirPath, true);
- } catch (IOException e) {
- throw new FalconException("Error creating directory: " + dirPath, e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/actions/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/actions/pom.xml b/addons/designer/actions/pom.xml
deleted file mode 100644
index 64f5ee3..0000000
--- a/addons/designer/actions/pom.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- </parent>
- <artifactId>designer-action</artifactId>
- <description>Apache Falcon Pipeline Designer - Action Module</description>
- <name>Apache Falcon Designer Action</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-core</artifactId>
- </dependency>
-</dependencies>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/actions/src/main/java/org/apache/falcon/designer/action/configuration/EmailActionConfiguration.java
----------------------------------------------------------------------
diff --git a/addons/designer/actions/src/main/java/org/apache/falcon/designer/action/configuration/EmailActionConfiguration.java b/addons/designer/actions/src/main/java/org/apache/falcon/designer/action/configuration/EmailActionConfiguration.java
deleted file mode 100644
index fd37a49..0000000
--- a/addons/designer/actions/src/main/java/org/apache/falcon/designer/action/configuration/EmailActionConfiguration.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.action.configuration;
-
-import org.apache.falcon.designer.configuration.ActionConfiguration;
-/**
- * Action object holding Email data.
- */
-public class EmailActionConfiguration extends ActionConfiguration<EmailActionConfiguration> {
-
- private String to;
- private String cc;
- private String subject;
- private String body;
-
- private static final String NAME = "Email";
-
- public String getTo() {
- return to;
- }
-
- public void setTo(String to) {
- this.to = to;
- }
-
- public String getCc() {
- return cc;
- }
-
- public void setCc(String cc) {
- this.cc = cc;
- }
-
- public String getSubject() {
- return subject;
- }
-
- public void setSubject(String subject) {
- this.subject = subject;
- }
-
- public String getBody() {
- return body;
- }
-
- public void setBody(String body) {
- this.body = body;
- }
-
- @Override
- public String getName() {
- return NAME;
- }
-
- @Override
- public Class<EmailActionConfiguration> getConfigClass() {
- return EmailActionConfiguration.class;
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/actions/src/main/java/org/apache/falcon/designer/primitive/action/EmailAction.java
----------------------------------------------------------------------
diff --git a/addons/designer/actions/src/main/java/org/apache/falcon/designer/primitive/action/EmailAction.java b/addons/designer/actions/src/main/java/org/apache/falcon/designer/primitive/action/EmailAction.java
deleted file mode 100644
index 1bd1197..0000000
--- a/addons/designer/actions/src/main/java/org/apache/falcon/designer/primitive/action/EmailAction.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.primitive.action;
-
-import org.apache.falcon.designer.action.configuration.EmailActionConfiguration;
-import org.apache.falcon.designer.primitive.Action;
-import org.apache.falcon.designer.primitive.Code;
-import org.apache.falcon.designer.primitive.Message;
-
-/**
- * EmailAction Primitive containing implementation to compile.
- */
-public class EmailAction extends Action<EmailAction, EmailActionConfiguration> {
-
- private String nameSpace;
- private String entity;
-
- private EmailActionConfiguration emailConfig;
-
- public EmailAction(EmailActionConfiguration config , String nameSpace, String entity) {
- this.emailConfig = config;
- this.nameSpace = nameSpace;
- this.entity = entity;
- }
-
- @Override
- public EmailActionConfiguration getConfiguration() {
- return emailConfig;
- }
-
- @Override
- public void setConfiguration(EmailActionConfiguration config) {
- this.emailConfig = config;
-
- }
-
- @Override
- public boolean hasOutput() {
- // TODO Auto-generated method stub
- return false;
- }
-
- @Override
- protected EmailAction copy() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Iterable<Message> validate() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- protected Code doCompile() {
- getConfiguration().getBody();
- return null;
- }
-
- @Override
- protected EmailAction doOptimize() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getNamespace() {
- return nameSpace;
- }
-
- @Override
- public String getEntity() {
- return entity;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/checkstyle/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/checkstyle/pom.xml b/addons/designer/checkstyle/pom.xml
deleted file mode 100644
index 5513685..0000000
--- a/addons/designer/checkstyle/pom.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>checkstyle</artifactId>
- <version>0.6-SNAPSHOT</version>
- <name>Apache Falcon Pipeline Designer Checkstyle</name>
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-java-header.txt
----------------------------------------------------------------------
diff --git a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-java-header.txt b/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-java-header.txt
deleted file mode 100644
index 5d5f1e3..0000000
--- a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-java-header.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-noframes.xsl
----------------------------------------------------------------------
diff --git a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-noframes.xsl b/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-noframes.xsl
deleted file mode 100644
index 6308ef8..0000000
--- a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle-noframes.xsl
+++ /dev/null
@@ -1,218 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-
- <xsl:output method="html" indent="yes"/>
- <xsl:decimal-format decimal-separator="." grouping-separator=","/>
-
- <xsl:key name="files" match="file" use="@name"/>
-
- <!-- Checkstyle XML Style Sheet by Stephane Bailliez <sb...@apache.org> -->
- <!-- Part of the Checkstyle distribution found at http://checkstyle.sourceforge.net -->
- <!-- Usage (generates checkstyle_report.html): -->
- <!-- <checkstyle failonviolation="false" config="${check.config}"> -->
- <!-- <fileset dir="${src.dir}" includes="**/*.java"/> -->
- <!-- <formatter type="xml" toFile="${doc.dir}/checkstyle_report.xml"/> -->
- <!-- </checkstyle> -->
- <!-- <style basedir="${doc.dir}" destdir="${doc.dir}" -->
- <!-- includes="checkstyle_report.xml" -->
- <!-- style="${doc.dir}/checkstyle-noframes-sorted.xsl"/> -->
-
- <xsl:template match="checkstyle">
- <html>
- <head>
- <style type="text/css">
- .bannercell {
- border: 0px;
- padding: 0px;
- }
- body {
- margin-left: 10;
- margin-right: 10;
- font:normal 80% arial,helvetica,sanserif;
- background-color:#FFFFFF;
- color:#000000;
- }
- .a td {
- background: #efefef;
- }
- .b td {
- background: #fff;
- }
- th, td {
- text-align: left;
- vertical-align: top;
- }
- th {
- font-weight:bold;
- background: #ccc;
- color: black;
- }
- table, th, td {
- font-size:100%;
- border: none
- }
- table.log tr td, tr th {
-
- }
- h2 {
- font-weight:bold;
- font-size:140%;
- margin-bottom: 5;
- }
- h3 {
- font-size:100%;
- font-weight:bold;
- background: #525D76;
- color: white;
- text-decoration: none;
- padding: 5px;
- margin-right: 2px;
- margin-left: 2px;
- margin-bottom: 0;
- }
- </style>
- </head>
- <body>
- <a name="top"></a>
- <!-- jakarta logo -->
- <table border="0" cellpadding="0" cellspacing="0" width="100%">
- <tr>
- <td class="bannercell" rowspan="2">
- <!--a href="http://jakarta.apache.org/">
- <img src="http://jakarta.apache.org/images/jakarta-logo.gif" alt="http://jakarta.apache.org" align="left" border="0"/>
- </a-->
- </td>
- <td class="text-align:right">
- <h2>CheckStyle Audit</h2>
- </td>
- </tr>
- <tr>
- <td class="text-align:right">Designed for use with
- <a href='http://checkstyle.sourceforge.net/'>CheckStyle</a>
- and<a href='http://jakarta.apache.org'>Ant</a>.
- </td>
- </tr>
- </table>
- <hr size="1"/>
-
- <!-- Summary part -->
- <xsl:apply-templates select="." mode="summary"/>
- <hr size="1" width="100%" align="left"/>
-
- <!-- Package List part -->
- <xsl:apply-templates select="." mode="filelist"/>
- <hr size="1" width="100%" align="left"/>
-
- <!-- For each package create its part -->
- <xsl:apply-templates select="file[@name and generate-id(.) = generate-id(key('files', @name))]"/>
-
- <hr size="1" width="100%" align="left"/>
-
-
- </body>
- </html>
- </xsl:template>
-
-
- <xsl:template match="checkstyle" mode="filelist">
- <h3>Files</h3>
- <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
- <tr>
- <th>Name</th>
- <th>Errors</th>
- </tr>
- <xsl:for-each select="file[@name and generate-id(.) = generate-id(key('files', @name))]">
- <xsl:sort data-type="number" order="descending" select="count(key('files', @name)/error)"/>
- <xsl:variable name="errorCount" select="count(error)"/>
- <tr>
- <xsl:call-template name="alternated-row"/>
- <td>
- <a href="#f-{@name}">
- <xsl:value-of select="@name"/>
- </a>
- </td>
- <td>
- <xsl:value-of select="$errorCount"/>
- </td>
- </tr>
- </xsl:for-each>
- </table>
- </xsl:template>
-
-
- <xsl:template match="file">
- <a name="f-{@name}"></a>
- <h3>File
- <xsl:value-of select="@name"/>
- </h3>
-
- <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
- <tr>
- <th>Error Description</th>
- <th>Line</th>
- </tr>
- <xsl:for-each select="key('files', @name)/error">
- <xsl:sort data-type="number" order="ascending" select="@line"/>
- <tr>
- <xsl:call-template name="alternated-row"/>
- <td>
- <xsl:value-of select="@message"/>
- </td>
- <td>
- <xsl:value-of select="@line"/>
- </td>
- </tr>
- </xsl:for-each>
- </table>
- <a href="#top">Back to top</a>
- </xsl:template>
-
-
- <xsl:template match="checkstyle" mode="summary">
- <h3>Summary</h3>
- <xsl:variable name="fileCount"
- select="count(file[@name and generate-id(.) = generate-id(key('files', @name))])"/>
- <xsl:variable name="errorCount" select="count(file/error)"/>
- <table class="log" border="0" cellpadding="5" cellspacing="2" width="100%">
- <tr>
- <th>Files</th>
- <th>Errors</th>
- </tr>
- <tr>
- <xsl:call-template name="alternated-row"/>
- <td>
- <xsl:value-of select="$fileCount"/>
- </td>
- <td>
- <xsl:value-of select="$errorCount"/>
- </td>
- </tr>
- </table>
- </xsl:template>
-
- <xsl:template name="alternated-row">
- <xsl:attribute name="class">
- <xsl:if test="position() mod 2 = 1">a</xsl:if>
- <xsl:if test="position() mod 2 = 0">b</xsl:if>
- </xsl:attribute>
- </xsl:template>
-</xsl:stylesheet>
-
-
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/checkstyle/src/main/resources/falcon/checkstyle.xml
----------------------------------------------------------------------
diff --git a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle.xml b/addons/designer/checkstyle/src/main/resources/falcon/checkstyle.xml
deleted file mode 100644
index 9e18299..0000000
--- a/addons/designer/checkstyle/src/main/resources/falcon/checkstyle.xml
+++ /dev/null
@@ -1,233 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<!DOCTYPE module PUBLIC
- "-//Puppy Crawl//DTD Check Configuration 1.2//EN"
- "http://www.puppycrawl.com/dtds/configuration_1_2.dtd">
-
-<!--
-
- Checkstyle configuration for Falcon that is based on the sun_checks.xml file
- that is bundled with Checkstyle and includes checks for:
-
- - the Java Language Specification at
- http://java.sun.com/docs/books/jls/second_edition/html/index.html
-
- - the Sun Code Conventions at http://java.sun.com/docs/codeconv/
-
- - the Javadoc guidelines at
- http://java.sun.com/j2se/javadoc/writingdoccomments/index.html
-
- - the JDK Api documentation http://java.sun.com/j2se/docs/api/index.html
-
- - some best practices
-
- Checkstyle is very configurable. Be sure to read the documentation at
- http://checkstyle.sf.net (or in your downloaded distribution).
-
- Most Checks are configurable, be sure to consult the documentation.
-
- To completely disable a check, just comment it out or delete it from the file.
-
- Finally, it is worth reading the documentation.
-
--->
-
-<module name="Checker">
-
- <!-- Checks that a package.html file exists for each package. -->
- <!-- See http://checkstyle.sf.net/config_javadoc.html#PackageHtml -->
- <!-- module name="PackageHtml"/ -->
-
- <!-- Checks whether files end with a new line. -->
- <!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
- <module name="NewlineAtEndOfFile"/>
-
- <!-- Checks for Headers -->
- <!-- See http://checkstyle.sf.net/config_header.html -->
- <module name="Header">
- <property name="headerFile" value="checkstyle/src/main/resources/falcon/checkstyle-java-header.txt"/>
- </module>
-
- <module name="FileLength"/>
- <module name="FileTabCharacter"/>
-
- <module name="TreeWalker">
- <!-- Checks for Javadoc comments. -->
- <!-- See http://checkstyle.sf.net/config_javadoc.html -->
- <module name="JavadocType">
- <property name="scope" value="public"/>
- <property name="allowMissingParamTags" value="true"/>
- </module>
- <module name="JavadocStyle"/>
-
- <module name="SuperClone"/>
- <module name="SuperFinalize"/>
-
- <!-- Checks for Naming Conventions. -->
- <!-- See http://checkstyle.sf.net/config_naming.html -->
- <module name="ConstantName"/>
- <module name="ClassTypeParameterName">
- <property name="format" value="^[A-Z]+$"/>
- </module>
- <module name="LocalFinalVariableName">
- <!--<property name="format" value="^[A-Z][_A-Z0-9]*$"/>-->
- </module>
- <module name="LocalVariableName"/>
- <module name="MemberName"/>
- <module name="MethodName"/>
- <module name="MethodTypeParameterName">
- <property name="format" value="^[A-Z]+$"/>
- </module>
- <module name="PackageName"/>
- <module name="ParameterName"/>
- <module name="StaticVariableName"/>
- <module name="TypeName"/>
-
- <!-- Checks for imports -->
- <!-- See http://checkstyle.sf.net/config_import.html -->
- <module name="IllegalImport"/>
- <!-- defaults to sun.* packages -->
- <module name="RedundantImport"/>
- <module name="UnusedImports"/>
-
-
- <!-- Checks for Size Violations. -->
- <!-- See http://checkstyle.sf.net/config_sizes.html -->
- <module name="LineLength">
- <property name="max" value="120"/>
- </module>
- <module name="MethodLength"/>
- <module name="ParameterNumber"/>
- <module name="OuterTypeNumber"/>
-
- <!-- Checks for whitespace -->
- <!-- See http://checkstyle.sf.net/config_whitespace.html -->
- <module name="GenericWhitespace"/>
- <module name="EmptyForIteratorPad"/>
- <module name="MethodParamPad"/>
- <module name="WhitespaceAround">
- <property name="tokens" value="LITERAL_IF"/>
- </module>
- <module name="NoWhitespaceAfter">
- <property name="tokens"
- value="BNOT, DEC, DOT, INC, LNOT, UNARY_MINUS, UNARY_PLUS"/>
- </module>
- <module name="NoWhitespaceBefore"/>
- <module name="OperatorWrap"/>
- <module name="ParenPad"/>
- <module name="TypecastParenPad"/>
- <module name="WhitespaceAfter">
- <property name="tokens" value="COMMA, SEMI"/>
- </module>
-
- <module name="Regexp">
- <property name="format" value="[ \t]+$"/>
- <property name="illegalPattern" value="true"/>
- <property name="message" value="Trailing whitespace"/>
- </module>
-
- <!-- Modifier Checks -->
- <!-- See http://checkstyle.sf.net/config_modifiers.html -->
- <module name="ModifierOrder"/>
- <module name="RedundantModifier"/>
-
-
- <!-- Checks for blocks. You know, those {}'s -->
- <!-- See http://checkstyle.sf.net/config_blocks.html -->
- <module name="AvoidNestedBlocks"/>
- <module name="EmptyBlock">
- <!-- catch blocks need a statement or a comment. -->
- <property name="option" value="text"/>
- <property name="tokens" value="LITERAL_CATCH"/>
- </module>
- <module name="EmptyBlock">
- <!-- all other blocks need a real statement. -->
- <property name="option" value="stmt"/>
- <property name="tokens" value="LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY,
- LITERAL_IF, LITERAL_FOR, LITERAL_TRY, LITERAL_WHILE, INSTANCE_INIT,
- STATIC_INIT"/>
- </module>
- <module name="LeftCurly"/>
- <module name="NeedBraces"/>
- <module name="RightCurly"/>
-
-
- <!-- Checks for common coding problems -->
- <!-- See http://checkstyle.sf.net/config_coding.html -->
- <!-- module name="AvoidInlineConditionals"/-->
- <module name="DoubleCheckedLocking"/>
- <module name="EmptyStatement"/>
- <module name="EqualsHashCode"/>
- <module name="StringLiteralEquality"/>
- <module name="HiddenField">
- <property name="ignoreConstructorParameter" value="true"/>
- <property name="ignoreAbstractMethods" value="true"/>
- <property name="ignoreSetter" value="true"/>
- </module>
- <module name="IllegalInstantiation"/>
- <module name="InnerAssignment"/>
- <module name="MissingSwitchDefault"/>
- <module name="RedundantThrows"/>
- <module name="SimplifyBooleanExpression"/>
- <module name="SimplifyBooleanReturn"/>
- <module name="DefaultComesLast"/>
-
- <!-- Checks for class design -->
- <!-- See http://checkstyle.sf.net/config_design.html -->
- <module name="FinalClass"/>
- <module name="HideUtilityClassConstructor"/>
- <module name="InterfaceIsType"/>
- <module name="VisibilityModifier">
- <property name="protectedAllowed" value="true"/>
- </module>
- <module name="MissingOverride"/>
-
-
- <!-- Miscellaneous other checks. -->
- <!-- See http://checkstyle.sf.net/config_misc.html -->
- <module name="ArrayTypeStyle"/>
- <module name="ArrayTrailingComma"/>
- <!--
- This generates too many false-positives on wrapped 'throws' clauses
- to be really useful. Disabled for now.
-
- Falcon style is:
- * Spaces, not tabs.
- * Indent by four spaces.
- * Indent by four spaces when wrapping a line.
- -->
- <module name="Indentation">
- <property name="basicOffset" value="4"/>
- <property name="caseIndent" value="0"/>
- </module>
- <module name="TodoComment"/>
- <module name="UpperEll"/>
-
- <module name="FileContentsHolder"/>
- </module>
-
- <!-- allow warnings to be suppressed -->
- <module name="SuppressionCommentFilter">
- <property name="offCommentFormat" value="SUSPEND CHECKSTYLE CHECK ParameterNumberCheck|VisibilityModifierCheck|HiddenFieldCheck|MethodName"/>
- <property name="onCommentFormat" value="RESUME CHECKSTYLE CHECK ParameterNumberCheck|VisibilityModifierCheck|HiddenFieldCheck|MethodName"/>
- <property name="checkFormat" value="ParameterNumberCheck|VisibilityModifierCheck|HiddenFieldCheck|MethodName"/>
- </module>
-
-</module>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/checkstyle/src/main/resources/falcon/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/addons/designer/checkstyle/src/main/resources/falcon/findbugs-exclude.xml b/addons/designer/checkstyle/src/main/resources/falcon/findbugs-exclude.xml
deleted file mode 100644
index 0a7580d..0000000
--- a/addons/designer/checkstyle/src/main/resources/falcon/findbugs-exclude.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<FindBugsFilter>
- <!-- These are generated by xjc compiler and hence excluded. -->
- <Match>
- <Or>
- <Class name="~org.apache.falcon.entity.v0.feed.Validity" />
- <Class name="~org.apache.falcon.entity.v0.process.Validity" />
- </Or>
- </Match>
-
- <!--
- Disable encoding as this might give an impression that Falcon code base is
- "Internationalization" ready, but we haven't done anything consciously to guarantee that.
- -->
- <Match>
- <Bug pattern="DM_DEFAULT_ENCODING" />
- </Match>
-</FindBugsFilter>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/common/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/common/pom.xml b/addons/designer/common/pom.xml
deleted file mode 100644
index d59a376..0000000
--- a/addons/designer/common/pom.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- </parent>
- <artifactId>designer-common</artifactId>
- <description>Apache Falcon Pipeline Designer - Common Module</description>
- <name>Apache Falcon Designer Common</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- </dependencies>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/core/pom.xml b/addons/designer/core/pom.xml
deleted file mode 100644
index 9833de0..0000000
--- a/addons/designer/core/pom.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- </parent>
- <artifactId>designer-core</artifactId>
- <description>Apache Falcon Pipeline Designer - Core Module</description>
- <name>Apache Falcon Designer Core</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-client</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
-
- <dependency>
- <groupId>net.sourceforge.findbugs</groupId>
- <artifactId>jsr305</artifactId>
- </dependency>
-
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- </dependency>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <scope>test</scope>
- <classifier>tests</classifier>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <scope>test</scope>
- <classifier>tests</classifier>
- </dependency>
- </dependencies>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/ActionConfiguration.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/ActionConfiguration.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/ActionConfiguration.java
deleted file mode 100644
index 884f459..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/ActionConfiguration.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.configuration;
-
-/**
- * Abstract ActionConfiguration extending Configuration.
- */
-public abstract class ActionConfiguration<A extends ActionConfiguration> extends Configuration<A> {
-
- private static final String CATEGORY = "ACTION";
-
- @Override
- public String getCategory() {
- return CATEGORY;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/Configuration.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/Configuration.java
deleted file mode 100644
index 30c0314..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/Configuration.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.configuration;
-
-import java.io.IOException;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-
-/**
- * This is the configuration that primitives will use to configure their
- * instance.
- */
-public abstract class Configuration<T extends Configuration> {
- private static final ObjectMapper MAPPER = new ObjectMapper();
-
- public abstract String getName();
-
- public abstract String getCategory();
-
- /**
- * Serialize it from a action type to string type.
- * @param act
- * actual Action Configuration data
- * @return
- */
- public String serialize() throws SerdeException {
- String returnJsonString;
- try {
- returnJsonString = MAPPER.writeValueAsString(this);
- } catch (JsonGenerationException e) {
- throw new SerdeException("Failed serializing object ", e);
- } catch (JsonMappingException e) {
- throw new SerdeException("Failed serializing object ", e);
- } catch (IOException e) {
- throw new SerdeException("Failed serializing object ", e);
- }
- return returnJsonString;
- }
-
- /**
- * Deserialize from string to object of Action type.
- * @param actString
- * actual Data
- * @return
- */
- public T deserialize(String actString) throws SerdeException {
- T returnEmailAction;
- try {
- returnEmailAction = MAPPER.readValue(actString, getConfigClass());
- } catch (JsonParseException e) {
- throw new SerdeException("Failed deserialize string " + actString,
- e);
- } catch (JsonMappingException e) {
- throw new SerdeException("Failed deserialize string " + actString,
- e);
- } catch (IOException e) {
- throw new SerdeException("Failed deserialize string " + actString,
- e);
- }
- return returnEmailAction;
- }
-
- public abstract Class<T> getConfigClass();
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/FlowConfig.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/FlowConfig.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/FlowConfig.java
deleted file mode 100644
index ba7d074..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/FlowConfig.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.configuration;
-
-/**
- * Flow data. More parameters would need to be added.
- */
-public class FlowConfig extends Configuration<FlowConfig> {
-
- private String namespace;
- private String entity;
- private String name;
- private static final String CATEGORY = "FLOW";
-
- public FlowConfig(String namespace, String entity, String name) {
- this.namespace = namespace;
- this.entity = entity;
- this.name = name;
-
- }
-
- @Override
- public String getCategory() {
- return CATEGORY;
- }
-
- @Override
- public String getName() {
- return name;
- }
-
- public String getNamespace() {
- return namespace;
- }
-
- public void setNamespace(String namespace) {
- this.namespace = namespace;
- }
-
- public String getEntity() {
- return entity;
- }
-
- public void setEntity(String entity) {
- this.entity = entity;
- }
-
- @Override
- public Class<FlowConfig> getConfigClass() {
- return FlowConfig.class;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/SerdeException.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/SerdeException.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/SerdeException.java
deleted file mode 100644
index 107180c..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/SerdeException.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.configuration;
-
-/**
- * Checked Exception that is thrown on serialization/deserialization of a config
- * object failure.
- */
-public class SerdeException extends Exception {
- /**
- * Constructs a default exception with no cause or message.
- */
- public SerdeException() {
- super();
- }
-
- /**
- * Constructs an exception with a specific message.
- * @param message
- * - Message on the exception
- */
- public SerdeException(String message) {
- super(message);
- }
-
- /**
- * Constructs an exception with a specific message and cause.
- * @param message
- * - Message on the exception
- * @param cause
- * - Underlying exception that resulted in this being thrown
- */
- public SerdeException(String message, Throwable cause) {
- super(message, cause);
- }
-
- /**
- * Constructs an exception with a cause and message is initialized to be
- * same as that of the cause.
- * @param cause
- * - Underlying exception that resulted in this being thrown
- */
- public SerdeException(Throwable cause) {
- super(cause);
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/TransformConfiguration.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/TransformConfiguration.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/TransformConfiguration.java
deleted file mode 100644
index 6e0fedb..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/configuration/TransformConfiguration.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.configuration;
-
-/**
- * Concrete implementation of Transform Configuration.
- */
-public abstract class TransformConfiguration<A extends TransformConfiguration> extends Configuration<A> {
-
-
- private static final String CATEGORY = "TRANSFORM";
-
- @Override
- public String getCategory() {
- return CATEGORY;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Action.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Action.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Action.java
deleted file mode 100644
index e16c013..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Action.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.falcon.designer.configuration.ActionConfiguration;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-
-import java.net.URI;
-import java.util.Map;
-
-/**
- * Actions are components of flows. They encapsulate an atomic
- * execution unit in a flow. If an action is dependent on another
- * action, then the action wont be started unless the preceding
- * action is entirely complete.
- *
- * Few examples of Actions are
- * 1. SSH Action, which executes a shell command on a remote host
- * 2. Data Transform Action, which executes a collection of data transformation
- * as a single logical unit
- *
- * There are no restrictions imposed on an Action with respect to the input
- * or the output such as in a Transformation.
- */
-public abstract class Action<A extends Action, B extends ActionConfiguration> extends Primitive<A, B> {
-
- protected static final String TYPE="ACTION";
-
- @Nonnull
- public String getType() {
- return TYPE;
- }
-
- /**
- * Indicates whether this action has any output or not. If this function
- * returns true, then {@link Action#outputKeys()} and {@link Action#outputFile()} ()}
- * needs to be implemented by the concrete class.
- *
- * @return - True if the action has output that any downstream control or action
- * can consume, false otherwise.
- */
- public abstract boolean hasOutput();
-
- /**
- * Are the keys that the action chooses to expose to the external world that
- * can either be used to manage the flow control or be used as an input in a subsequent
- * action within the context of a flow. Each key that may be output by this
- * action would have to declared along with its return type {@link java.sql.Types}.
- *
- * If an action output key isn't white listed through this with a specific type,
- * then default type is assumed {@link java.sql.Types#VARCHAR}. All output keys
- * are nullable. If null the flow control can assume default values (which may vary
- * depending on the data type. For ex: VARCHAR(""), NUMERIC(0), DATETIME(CURRENTTIME))
- *
- * @return - Output key name and its corresponding data type. If the output key
- * doesn't conform to this data type, the flow may fail at runtime or may safely assume
- * default value. The behavior is left to the implementation of the flow compiler
- * and scheduler.
- */
- @Nullable
- public Map<String, Integer> outputKeys() {
- if (hasOutput()) {
- throw new NotImplementedException(getClass() + "::outputKeys()");
- } else {
- return null;
- }
- }
-
- /**
- * Returns a file name as URI that can be mapped to generic hadoop file system
- * implementation.
- *
- * @return - Fully qualified file name uri as understood by the hadoop file system.
- */
- @Nullable
- public URI outputFile() {
- if (hasOutput()) {
- throw new NotImplementedException(getClass() + "::outputFile()");
- } else {
- return null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Code.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Code.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Code.java
deleted file mode 100644
index 35eeeb1..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Code.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-/**
- * For now a simple marker interface for holding source code
- * corresponding to a primitive.
- */
-public interface Code {
- //TODO Details to be figured out as go along.
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/CompilationException.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/CompilationException.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/CompilationException.java
deleted file mode 100644
index 603225b..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/CompilationException.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-import java.util.Iterator;
-
-/**
- * Checked excpetion that is thrown when there are one more
- * issues with the primitive that is being compiled.
- */
-public class CompilationException extends Exception implements Iterable<Message> {
-
- private final String detailedMessage;
-
- private final Iterable<Message> messages;
- /**
- * Construct default CompilationException with the messages (that
- * may typically be returned by the validation phase).
- *
- * @param validationMessages - Iterable messages.
- */
- public CompilationException(Iterable<Message> validationMessages) {
- StringBuilder buffer = new StringBuilder();
- for (Message message : validationMessages) {
- if (buffer.length() > 0) {
- buffer.append('\n');
- }
- buffer.append(message);
- }
- detailedMessage = buffer.toString();
- messages = validationMessages;
- }
-
- @Override
- public String getMessage() {
- return detailedMessage;
- }
-
-
- @Override
- public Iterator<Message> iterator() {
- return messages.iterator();
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Message.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Message.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Message.java
deleted file mode 100644
index e5a68a8..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Message.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-/**
- * Messages are pieces of information that may be surfaced to the caller
- * by any of the operation on the primitives. For ex. Compilation on the
- * transformation or flow may return some messages, some of which may be
- * serious errors or warnings.
- */
-public class Message {
-
- /**
- * Message type that each message is associated with.
- */
- public enum Type {ERROR, WARNING, INFORMATION}
-
- private final Type type;
- private final String message;
- private Object context;
-
- public Message(Type messageType, String messageText) {
- this.type = messageType;
- this.message = messageText;
- }
-
- public Type getType() {
- return type;
- }
-
- public String getMessage() {
- return message;
- }
-
- public Object getContext() {
- return context;
- }
-
- public void setContext(Object context) {
- this.context = context;
- }
-
- @Override
- public String toString() {
- return "Message{"
- + "type=" + type
- + ", message='" + message + '\''
- + ", context=" + context
- + '}';
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Primitive.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Primitive.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Primitive.java
deleted file mode 100644
index aa825a9..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Primitive.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import org.apache.falcon.designer.configuration.Configuration;
-import org.apache.falcon.designer.configuration.SerdeException;
-import org.apache.falcon.designer.storage.Storage;
-import org.apache.falcon.designer.storage.StorageException;
-import org.apache.falcon.designer.storage.Storeable;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-
-/**
- * All elements of the pipeline are essentially a primitive. These primitives
- * only have life during the pipeline design time.
- */
-public abstract class Primitive<T extends Primitive, V extends Configuration>
- implements Storeable {
-
- public abstract void setConfiguration(V config);
-
- protected abstract T copy();
-
- @Nonnull
- public abstract V getConfiguration();
-
- /**
- * Perform a validation to see if the primitive configuration is consistent
- * with this primitive.
- * @return - Messages that need to be sent as feedback from the validation.
- * As long as one of the messages returned has a message type.
- * Returns null if there are no messages to return from the
- * validation. {@link Message.Type} is {@link Message.Type#ERROR},
- * the primitive cannot be compiled successfully.
- */
- @Nullable
- public abstract Iterable<Message> validate();
-
- /**
- * Compile the primitive and generate corresponding binary/source code The
- * compile method fails if there are one or more {@link Message.Type#ERROR}
- * messages from the validation phase.
- * @return - Code object generated for the primitive.
- * @throws CompilationException
- * - Compilation issues as returned by validate (if any of the
- * {@link Message} is of type {@link Message.Type#ERROR}
- */
- @Nonnull
- public Code compile() throws CompilationException {
- Iterable<Message> validationMessages = validate();
- boolean error = false;
- if (validationMessages != null) {
- for (Message message : validationMessages) {
- if (message.getType() == Message.Type.ERROR) {
- error = true;
- break;
- }
- }
- }
- if (error) {
- throw new CompilationException(validationMessages);
- } else {
- T optimized = optimize();
- return optimized.doCompile();
- }
- }
-
- /**
- * This method is invoked only when the primitive is confirmed to be valid.
- * This would generate binary or source code for this primitive and its
- * configuration.
- * @return - Code object generated by the primitive.
- */
- @Nonnull
- protected abstract Code doCompile();
-
- /**
- * This method is invoked only when the primitive is confirmed to be valid.
- * This would operate on the current instance and return back an optimized
- * version of the same type.
- * @return - Optimized object of the same type.
- */
- @Nonnull
- @SuppressWarnings("unchecked")
- public T optimize() {
- T copy = copy();
- return (T) copy.doOptimize();
- }
-
- protected abstract T doOptimize();
-
- public abstract String getNamespace();
-
- public abstract String getEntity();
-
- @Override
- public void store(Storage storage) throws StorageException {
- try {
-
- BufferedWriter bufferedWriterInst =
- new BufferedWriter(new OutputStreamWriter(storage.create(
- getNamespace(), getEntity())));
- String serializedResource =
- getConfiguration().serialize();
- bufferedWriterInst.write(serializedResource);
- bufferedWriterInst.close();
- } catch (IOException e) {
- throw new StorageException(e.getMessage());
- } catch (SerdeException e) {
- throw new StorageException(e.getMessage());
- }
-
- }
-
- @Override
- public void restore(Storage storage) throws StorageException {
- try {
- BufferedReader bufferedReaderInst =
- new BufferedReader(new InputStreamReader(storage.open(
- getNamespace(), getEntity())));
- String configInString = bufferedReaderInst.readLine();
- setConfiguration((V)getConfiguration().deserialize(configInString));
- bufferedReaderInst.close();
- } catch (IOException e) {
- throw new StorageException(e.getMessage());
- } catch (SerdeException e) {
- throw new StorageException(e.getMessage());
- }
-
- }
-
- @Override
- public void delete(Storage storage) throws StorageException {
- storage.delete(getNamespace(), getEntity());
- setConfiguration(null);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Transform.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Transform.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Transform.java
deleted file mode 100644
index b2d4396..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/primitive/Transform.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-import org.apache.falcon.designer.configuration.TransformConfiguration;
-import org.apache.falcon.designer.schema.RelationalData;
-
-import javax.annotation.Nonnull;
-
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Transform is a foundational primitive in Falcon designer. All well
- * understood data transformations are to be implemented as a Transform.
- *
- * A transform would typically consume one or more data inputs conforming
- * to a schema and would produce one or more output typically with an uniform schema.
- *
- */
-public abstract class Transform extends Primitive<Transform , TransformConfiguration> {
-
- protected List<RelationalData> inputData;
- protected List<RelationalData> outputData;
-
- /** Empty constructor to be used only for deserialization
- * and cloning. Not otherwise.
- */
- protected Transform() {
- }
-
- /**
- * Setter typically used for deserialization & cloning.
- *
- * @param inputData - List of input data
- */
- protected void setInputData(@Nonnull List<RelationalData> inputData) {
- this.inputData = inputData;
- }
-
- /**
- * Setter typically used for deserialization & cloning.
- *
- * @param outputData - List of output data
- */
- protected void setOutputData(@Nonnull List<RelationalData> outputData) {
- this.outputData = outputData;
- }
-
- /**
- * Each Transform by default requires one or more input data sets
- * and produces a single output data set.
- *
- * @param inData - List of input data sets for this transform
- * @param outData - List of Output data produced by this transform
- */
- protected Transform(@Nonnull List<RelationalData> inData, @Nonnull List<RelationalData> outData) {
- inputData = Collections.unmodifiableList(inData);
- outputData = Collections.unmodifiableList(outData);
- }
-
- @Nonnull
- public List<RelationalData> getInputData() {
- return inputData;
- }
-
- @Nonnull
- public List<RelationalData> getOutputData() {
- return outputData;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalData.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalData.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalData.java
deleted file mode 100644
index d930e40..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalData.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.schema;
-
-import org.apache.falcon.designer.source.DataSource;
-import org.apache.falcon.designer.storage.Storage;
-import org.apache.falcon.designer.storage.StorageException;
-import org.apache.falcon.designer.storage.Storeable;
-
-import javax.annotation.Nonnull;
-
-/**
- * Relational Data marker for now.
- */
-public class RelationalData implements Storeable {
-
- //TODO To work out details as we go along.
-
- public RelationalData(RelationalSchema relationalSchema, DataSource dataSource) {
-
- }
-
- @Override
- public void store(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-
- @Override
- public void restore(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-
- @Override
- public void delete(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-}
[4/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/DefaultPartitioner.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/DefaultPartitioner.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/DefaultPartitioner.java
deleted file mode 100644
index ce4bfab..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/DefaultPartitioner.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import com.google.common.collect.Lists;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.EventSourcerUtils;
-import org.apache.falcon.hive.util.HiveDRUtils;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.apache.hive.hcatalog.api.repl.Command;
-import org.apache.hive.hcatalog.api.repl.ReplicationTask;
-import org.apache.hive.hcatalog.api.repl.StagingDirectoryProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hive.hcatalog.api.HCatNotificationEvent.Scope;
-
-/**
- * Partitioner for partitioning events for a given DB.
- */
-public class DefaultPartitioner implements Partitioner {
-
- private static final Logger LOG = LoggerFactory.getLogger(DefaultPartitioner.class);
- private EventFilter eventFilter;
- private final DRStatusStore drStore;
- private final EventSourcerUtils eventSourcerUtils;
-
- private enum CMDTYPE {
- SRC_CMD_TYPE,
- TGT_CMD_TYPE
- }
-
- public DefaultPartitioner(DRStatusStore drStore, EventSourcerUtils eventSourcerUtils) {
- this.drStore = drStore;
- this.eventSourcerUtils = eventSourcerUtils;
- }
-
- private class EventFilter {
- private final Map<String, Long> eventFilterMap;
-
- public EventFilter(String sourceMetastoreUri, String targetMetastoreUri, String jobName,
- String database) throws Exception {
- eventFilterMap = new HashMap<>();
- Iterator<ReplicationStatus> replStatusIter = drStore.getTableReplicationStatusesInDb(sourceMetastoreUri,
- targetMetastoreUri, jobName, database);
- while (replStatusIter.hasNext()) {
- ReplicationStatus replStatus = replStatusIter.next();
- eventFilterMap.put(replStatus.getTable(), replStatus.getEventId());
- }
- }
- }
-
- public ReplicationEventMetadata partition(final HiveDROptions drOptions, final String databaseName,
- final Iterator<ReplicationTask> taskIter) throws Exception {
- long lastCounter = 0;
- String dbName = databaseName.toLowerCase();
- // init filtering before partitioning
- this.eventFilter = new EventFilter(drOptions.getSourceMetastoreUri(), drOptions.getTargetMetastoreUri(),
- drOptions.getJobName(), dbName);
- String srcStagingDirProvider = drOptions.getSourceStagingPath();
- String dstStagingDirProvider = drOptions.getTargetStagingPath();
-
- List<Command> dbSrcEventList = Lists.newArrayList();
- List<Command> dbTgtEventList = Lists.newArrayList();
-
- Map<String, List<String>> eventMetaFileMap =
- new HashMap<>();
- Map<String, List<OutputStream>> outputStreamMap =
- new HashMap<>();
-
-
- String srcFilename = null;
- String tgtFilename = null;
- OutputStream srcOutputStream = null;
- OutputStream tgtOutputStream = null;
-
- while (taskIter.hasNext()) {
- ReplicationTask task = taskIter.next();
- if (task.needsStagingDirs()) {
- task.withSrcStagingDirProvider(new StagingDirectoryProvider.TrivialImpl(srcStagingDirProvider,
- HiveDRUtils.SEPARATOR));
- task.withDstStagingDirProvider(new StagingDirectoryProvider.TrivialImpl(dstStagingDirProvider,
- HiveDRUtils.SEPARATOR));
- }
-
- if (task.isActionable()) {
- Scope eventScope = task.getEvent().getEventScope();
- String tableName = task.getEvent().getTableName();
- if (StringUtils.isNotEmpty(tableName)) {
- tableName = tableName.toLowerCase();
- }
-
- boolean firstEventForTable = (eventScope == Scope.TABLE)
- && isFirstEventForTable(eventMetaFileMap, tableName);
- if (firstEventForTable && (task.getSrcWhCommands() != null || task.getDstWhCommands() != null)) {
- ++lastCounter;
- }
- Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> srcCmds = task.getSrcWhCommands();
- if (srcCmds != null) {
- if (eventScope == Scope.DB) {
- processDBScopeCommands(dbSrcEventList, srcCmds, outputStreamMap, CMDTYPE.SRC_CMD_TYPE);
- } else if (eventScope == Scope.TABLE) {
- OutputStream srcOut;
- if (firstEventForTable) {
- srcFilename = eventSourcerUtils.getSrcFileName(String.valueOf(lastCounter)).toString();
- srcOutputStream = eventSourcerUtils.getFileOutputStream(srcFilename);
- srcOut = srcOutputStream;
- } else {
- srcOut = outputStreamMap.get(tableName).get(0);
- }
- processTableScopeCommands(srcCmds, eventMetaFileMap, tableName, dbSrcEventList, srcOut);
- } else {
- throw new Exception("Event scope is not DB or Table");
- }
- }
-
-
- Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> dstCmds = task.getDstWhCommands();
- if (dstCmds != null) {
- if (eventScope == Scope.DB) {
- processDBScopeCommands(dbTgtEventList, dstCmds, outputStreamMap, CMDTYPE.TGT_CMD_TYPE);
- } else if (eventScope == Scope.TABLE) {
- OutputStream tgtOut;
- if (firstEventForTable) {
- tgtFilename = eventSourcerUtils.getTargetFileName(String.valueOf(lastCounter)).toString();
- tgtOutputStream = eventSourcerUtils.getFileOutputStream(tgtFilename);
- tgtOut = tgtOutputStream;
- } else {
- tgtOut = outputStreamMap.get(tableName).get(1);
- }
- processTableScopeCommands(dstCmds, eventMetaFileMap, tableName, dbTgtEventList, tgtOut);
- } else {
- throw new Exception("Event scope is not DB or Table");
- }
- }
-
- // If first table event, update the state data at the end
- if (firstEventForTable) {
- updateStateDataIfFirstTableEvent(tableName, srcFilename, tgtFilename, srcOutputStream,
- tgtOutputStream, eventMetaFileMap, outputStreamMap);
- }
- } else {
- LOG.error("Task is not actionable with event Id : {}", task.getEvent().getEventId());
- }
- }
-
- ReplicationEventMetadata eventMetadata = new ReplicationEventMetadata();
- // If there were only DB events for this run
- if (eventMetaFileMap.isEmpty()) {
- ++lastCounter;
- if (!dbSrcEventList.isEmpty()) {
- srcFilename = eventSourcerUtils.getSrcFileName(String.valueOf(lastCounter)).toString();
- srcOutputStream = eventSourcerUtils.getFileOutputStream(srcFilename);
- eventSourcerUtils.persistReplicationEvents(srcOutputStream, dbSrcEventList);
- }
-
- if (!dbTgtEventList.isEmpty()) {
- tgtFilename = eventSourcerUtils.getTargetFileName(String.valueOf(lastCounter)).toString();
- tgtOutputStream = eventSourcerUtils.getFileOutputStream(tgtFilename);
- eventSourcerUtils.persistReplicationEvents(tgtOutputStream, dbTgtEventList);
- }
-
- // Close the stream
- eventSourcerUtils.closeOutputStream(srcOutputStream);
- eventSourcerUtils.closeOutputStream(tgtOutputStream);
- EventSourcerUtils.updateEventMetadata(eventMetadata, dbName, null, srcFilename, tgtFilename);
- } else {
- closeAllStreams(outputStreamMap);
- for (Map.Entry<String, List<String>> entry : eventMetaFileMap.entrySet()) {
- String srcFile = null;
- String tgtFile = null;
- if (entry.getValue() != null) {
- srcFile = entry.getValue().get(0);
- tgtFile = entry.getValue().get(1);
- }
- EventSourcerUtils.updateEventMetadata(eventMetadata, dbName, entry.getKey(), srcFile, tgtFile);
- }
- }
-
- return eventMetadata;
- }
-
- private void updateStateDataIfFirstTableEvent(final String tableName, final String srcFilename,
- final String tgtFilename,
- final OutputStream srcOutputStream,
- final OutputStream tgtOutputStream,
- Map<String, List<String>> eventMetaFileMap,
- Map<String, List<OutputStream>> outputStreamMap) {
- List<String> files = Arrays.asList(srcFilename, tgtFilename);
- eventMetaFileMap.put(tableName, files);
-
- List<OutputStream> streams = Arrays.asList(srcOutputStream, tgtOutputStream);
- outputStreamMap.put(tableName, streams);
- }
-
- private void closeAllStreams(final Map<String, List<OutputStream>> outputStreamMap) throws Exception {
- if (outputStreamMap == null || outputStreamMap.isEmpty()) {
- return;
- }
-
- for (Map.Entry<String, List<OutputStream>> entry : outputStreamMap.entrySet()) {
- List<OutputStream> streams = entry.getValue();
-
- for (OutputStream out : streams) {
- if (out != null) {
- eventSourcerUtils.closeOutputStream(out);
- }
- }
- }
- }
-
- private void processDBScopeCommands(final List<Command> dbEventList, final Iterable<? extends org.apache.hive
- .hcatalog.api.repl.Command> cmds, final Map<String, List<OutputStream>> outputStreamMap, CMDTYPE cmdType
- ) throws Exception {
- addCmdsToDBEventList(dbEventList, cmds);
-
- /* add DB event to all tables */
- if (!outputStreamMap.isEmpty()) {
- addDbEventToAllTablesEventFile(cmds, outputStreamMap, cmdType);
- }
- }
-
- private void processTableScopeCommands(final Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> cmds,
- final Map<String,
- List<String>> eventMetaFileMap, String tableName,
- final List<Command> dbEventList, final OutputStream out) throws Exception {
- // First event for this table
- // Before adding this event, add all the DB events
- if (isFirstEventForTable(eventMetaFileMap, tableName)) {
- addDbEventsToTableEventFile(out, dbEventList, tableName);
- }
- addTableEventToFile(out, cmds, tableName);
- }
-
- private boolean isFirstEventForTable(final Map<String,
- List<String>> eventMetaFileMap, final String tableName) {
- List<String> files = eventMetaFileMap.get(tableName);
- return (files == null || files.isEmpty());
- }
-
- private void addCmdsToDBEventList(List<Command> dbEventList, final java.lang.Iterable
- <? extends org.apache.hive.hcatalog.api.repl.Command> cmds) {
- for (Command cmd : cmds) {
- dbEventList.add(cmd);
- }
- }
-
- private void addDbEventToAllTablesEventFile(
- final java.lang.Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> cmds,
- final Map<String, List<OutputStream>> outputStreamMap, final CMDTYPE cmdType) throws Exception {
- for (Map.Entry<String, List<OutputStream>> entry : outputStreamMap.entrySet()) {
- String tableName = entry.getKey();
- List<OutputStream> streams = entry.getValue();
- OutputStream out;
- if (CMDTYPE.SRC_CMD_TYPE == cmdType) {
- out = streams.get(0);
- } else {
- out = streams.get(1);
- }
- addTableEventToFile(out, cmds, tableName);
- }
- }
-
- private void addDbEventsToTableEventFile(final OutputStream out, final List<Command> dbEventList,
- final String tableName) throws Exception {
- /* First event for the table, add db events before adding this event */
- addTableEventToFile(out, dbEventList, tableName);
- }
-
- private void addTableEventToFile(final OutputStream out,
- final java.lang.Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> cmds,
- final String tableName) throws Exception {
- Long eventId = eventFilter.eventFilterMap.get(tableName);
- /* If not already processed, add it */
- for (Command cmd : cmds) {
- persistEvent(out, eventId, cmd);
- }
- }
-
- private void persistEvent(final OutputStream out, final Long eventId, final Command cmd) throws Exception {
- if (out == null) {
- LOG.debug("persistEvent : out is null");
- return;
- }
- if (eventId == null || cmd.getEventId() > eventId) {
- eventSourcerUtils.persistReplicationEvents(out, cmd);
- }
- }
-
- public boolean isPartitioningRequired(final HiveDROptions options) {
- return (HiveDRUtils.getReplicationType(options.getSourceTables()) == HiveDRUtils.ReplicationType.DB);
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/EventSourcer.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/EventSourcer.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/EventSourcer.java
deleted file mode 100644
index 5f3312c..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/EventSourcer.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-/**
- * Source events for each table into a file.
- */
-public interface EventSourcer {
- /**
- * @param inputOptions
- * @return input filename to mapper
- */
- /* Source events for each <db, table> into a file */
- String sourceEvents(HiveDROptions inputOptions) throws Exception;
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
deleted file mode 100644
index 5490232..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRArgs.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.Option;
-
-/**
- * Arguments for workflow execution.
- */
-public enum HiveDRArgs {
-
- // source meta store details
- SOURCE_CLUSTER("sourceCluster", "source cluster"),
- SOURCE_METASTORE_URI("sourceMetastoreUri", "source meta store uri"),
- SOURCE_HS2_URI("sourceHiveServer2Uri", "source HS2 uri"),
- SOURCE_DATABASE("sourceDatabase", "comma source databases"),
- SOURCE_TABLE("sourceTable", "comma source tables"),
- SOURCE_STAGING_PATH("sourceStagingPath", "source staging path for data"),
-
- // source hadoop endpoints
- SOURCE_NN("sourceNN", "source name node"),
- // source security kerberos principals
- SOURCE_NN_KERBEROS_PRINCIPAL("sourceNNKerberosPrincipal", "Source name node kerberos principal", false),
- SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL("sourceHiveMetastoreKerberosPrincipal",
- "Source hive metastore kerberos principal", false),
- SOURCE_HIVE2_KERBEROS_PRINCIPAL("sourceHive2KerberosPrincipal", "Source hiveserver2 kerberos principal", false),
-
- TARGET_CLUSTER("targetCluster", "target cluster"),
- // target meta store details
- TARGET_METASTORE_URI("targetMetastoreUri", "source meta store uri"),
- TARGET_HS2_URI("targetHiveServer2Uri", "source meta store uri"),
-
- TARGET_STAGING_PATH("targetStagingPath", "source staging path for data"),
-
- // target hadoop endpoints
- TARGET_NN("targetNN", "target name node"),
- // target security kerberos principals
- TARGET_NN_KERBEROS_PRINCIPAL("targetNNKerberosPrincipal", "Target name node kerberos principal", false),
- TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL("targetHiveMetastoreKerberosPrincipal",
- "Target hive metastore kerberos principal", false),
- TARGET_HIVE2_KERBEROS_PRINCIPAL("targetHive2KerberosPrincipal", "Target hiveserver2 kerberos principal", false),
-
- // num events
- MAX_EVENTS("maxEvents", "number of events to process in this run"),
-
- // tuning params
- REPLICATION_MAX_MAPS("replicationMaxMaps", "number of maps", false),
- DISTCP_MAX_MAPS("distcpMaxMaps", "number of maps", false),
-
- // Map Bandwidth
- DISTCP_MAP_BANDWIDTH("distcpMapBandwidth", "map bandwidth in mb", false),
-
- JOB_NAME("drJobName", "unique job name"),
-
- CLUSTER_FOR_JOB_RUN("clusterForJobRun", "cluster where job runs"),
- JOB_CLUSTER_NN("clusterForJobRunWriteEP", "write end point of cluster where job runs"),
- JOB_CLUSTER_NN_KERBEROS_PRINCIPAL("clusterForJobNNKerberosPrincipal",
- "Namenode kerberos principal of cluster on which replication job runs", false),
-
-
- FALCON_LIBPATH("falconLibPath", "Falcon Lib Path for Jar files", false),
-
- KEEP_HISTORY("keepHistory", "Keep history of events file generated", false),
- EXECUTION_STAGE("executionStage", "Flag for workflow stage execution", false),
- COUNTER_LOGDIR("counterLogDir", "Log directory to store counter file", false);
-
- private final String name;
- private final String description;
- private final boolean isRequired;
-
- HiveDRArgs(String name, String description) {
- this(name, description, true);
- }
-
- HiveDRArgs(String name, String description, boolean isRequired) {
- this.name = name;
- this.description = description;
- this.isRequired = isRequired;
- }
-
- public Option getOption() {
- return new Option(this.name, true, this.description);
- }
-
- public String getName() {
- return this.name;
- }
-
- public String getDescription() {
- return description;
- }
-
- public boolean isRequired() {
- return isRequired;
- }
-
- public String getOptionValue(CommandLine cmd) {
- return cmd.getOptionValue(this.name);
- }
-
- @Override
- public String toString() {
- return getName();
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
deleted file mode 100644
index 28515e4..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDROptions.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Tool Options.
- */
-public class HiveDROptions {
- private final Map<HiveDRArgs, String> context;
-
- protected HiveDROptions(Map<HiveDRArgs, String> context) {
- this.context = context;
- }
-
- public String getValue(HiveDRArgs arg) {
- return context.get(arg);
- }
-
- public Map<HiveDRArgs, String> getContext() {
- return context;
- }
-
- public String getSourceMetastoreUri() {
- return context.get(HiveDRArgs.SOURCE_METASTORE_URI);
- }
-
- public String getSourceMetastoreKerberosPrincipal() {
- return context.get(HiveDRArgs.SOURCE_HIVE_METASTORE_KERBEROS_PRINCIPAL);
- }
-
- public String getSourceHive2KerberosPrincipal() {
- return context.get(HiveDRArgs.SOURCE_HIVE2_KERBEROS_PRINCIPAL);
- }
-
- public List<String> getSourceDatabases() {
- return Arrays.asList(context.get(HiveDRArgs.SOURCE_DATABASE).trim().split(","));
- }
-
- public List<String> getSourceTables() {
- return Arrays.asList(context.get(HiveDRArgs.SOURCE_TABLE).trim().split(","));
- }
-
- public String getSourceStagingPath() throws HiveReplicationException {
- if (StringUtils.isNotEmpty(context.get(HiveDRArgs.SOURCE_STAGING_PATH))) {
- return context.get(HiveDRArgs.SOURCE_STAGING_PATH) + File.separator + getJobName();
- }
- throw new HiveReplicationException("Source StagingPath cannot be empty");
- }
-
- public String getSourceWriteEP() {
- return context.get(HiveDRArgs.SOURCE_NN);
- }
-
- public String getSourceNNKerberosPrincipal() {
- return context.get(HiveDRArgs.SOURCE_NN_KERBEROS_PRINCIPAL);
- }
-
- public String getTargetWriteEP() {
- return context.get(HiveDRArgs.TARGET_NN);
- }
-
- public String getTargetMetastoreUri() {
- return context.get(HiveDRArgs.TARGET_METASTORE_URI);
- }
-
- public String getTargetNNKerberosPrincipal() {
- return context.get(HiveDRArgs.TARGET_NN_KERBEROS_PRINCIPAL);
- }
-
- public String getTargetMetastoreKerberosPrincipal() {
- return context.get(HiveDRArgs.TARGET_HIVE_METASTORE_KERBEROS_PRINCIPAL);
- }
- public String getTargetHive2KerberosPrincipal() {
- return context.get(HiveDRArgs.TARGET_HIVE2_KERBEROS_PRINCIPAL);
- }
-
- public String getTargetStagingPath() throws HiveReplicationException {
- if (StringUtils.isNotEmpty(context.get(HiveDRArgs.TARGET_STAGING_PATH))) {
- return context.get(HiveDRArgs.TARGET_STAGING_PATH) + File.separator + getJobName();
- }
- throw new HiveReplicationException("Target StagingPath cannot be empty");
- }
-
- public String getReplicationMaxMaps() {
- return context.get(HiveDRArgs.REPLICATION_MAX_MAPS);
- }
-
- public String getJobName() {
- return context.get(HiveDRArgs.JOB_NAME);
- }
-
- public int getMaxEvents() {
- return Integer.valueOf(context.get(HiveDRArgs.MAX_EVENTS));
- }
-
- public boolean shouldKeepHistory() {
- return Boolean.valueOf(context.get(HiveDRArgs.KEEP_HISTORY));
- }
-
- public String getJobClusterWriteEP() {
- return context.get(HiveDRArgs.JOB_CLUSTER_NN);
- }
-
- public String getJobClusterNNPrincipal() {
- return context.get(HiveDRArgs.JOB_CLUSTER_NN_KERBEROS_PRINCIPAL);
- }
-
- public void setSourceStagingDir(String path) {
- context.put(HiveDRArgs.SOURCE_STAGING_PATH, path);
- }
-
- public void setTargetStagingDir(String path) {
- context.put(HiveDRArgs.TARGET_STAGING_PATH, path);
- }
-
- public String getExecutionStage() {
- return context.get(HiveDRArgs.EXECUTION_STAGE);
- }
-
- public boolean shouldBlock() {
- return true;
- }
-
- public static HiveDROptions create(String[] args) throws ParseException {
- Map<HiveDRArgs, String> options = new HashMap<HiveDRArgs, String>();
-
- CommandLine cmd = getCommand(args);
- for (HiveDRArgs arg : HiveDRArgs.values()) {
- String optionValue = arg.getOptionValue(cmd);
- if (StringUtils.isNotEmpty(optionValue)) {
- options.put(arg, optionValue);
- }
- }
-
- return new HiveDROptions(options);
- }
-
- private static CommandLine getCommand(String[] arguments) throws ParseException {
- Options options = new Options();
-
- for (HiveDRArgs arg : HiveDRArgs.values()) {
- addOption(options, arg, arg.isRequired());
- }
-
- return new GnuParser().parse(options, arguments, false);
- }
-
- private static void addOption(Options options, HiveDRArgs arg, boolean isRequired) {
- Option option = arg.getOption();
- option.setRequired(isRequired);
- options.addOption(option);
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java
deleted file mode 100644
index e141800..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/HiveDRTool.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.mapreduce.CopyMapper;
-import org.apache.falcon.hive.mapreduce.CopyReducer;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.DelimiterUtils;
-import org.apache.falcon.hive.util.EventSourcerUtils;
-import org.apache.falcon.hive.util.FileUtils;
-import org.apache.falcon.hive.util.HiveDRStatusStore;
-import org.apache.falcon.hive.util.HiveDRUtils;
-import org.apache.falcon.hive.util.HiveMetastoreUtils;
-import org.apache.falcon.job.JobCounters;
-import org.apache.falcon.job.JobCountersHandler;
-import org.apache.falcon.job.JobType;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * DR Tool Driver.
- */
-public class HiveDRTool extends Configured implements Tool {
-
- private static final String META_PATH_FILE_SUFFIX = ".metapath";
-
- private FileSystem jobFS;
- private FileSystem sourceClusterFS;
- private FileSystem targetClusterFS;
-
- private HiveDROptions inputOptions;
- private DRStatusStore drStore;
- private String eventsMetaFile;
- private EventSourcerUtils eventSoucerUtil;
- private Configuration jobConf;
- private String executionStage;
-
- public static final FsPermission STAGING_DIR_PERMISSION =
- new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
-
- private static final Logger LOG = LoggerFactory.getLogger(HiveDRTool.class);
-
- public HiveDRTool() {
- }
-
- @Override
- public int run(String[] args) throws Exception {
- if (args.length < 1) {
- usage();
- return -1;
- }
-
- try {
- init(args);
- } catch (Throwable e) {
- LOG.error("Invalid arguments: ", e);
- System.err.println("Invalid arguments: " + e.getMessage());
- usage();
- return -1;
- }
-
- try {
- Job job = execute();
- if ((job != null) && (inputOptions.getExecutionStage().equalsIgnoreCase(
- HiveDRUtils.ExecutionStage.EXPORT.name()))) {
- if ((job.getStatus().getState() == JobStatus.State.SUCCEEDED)
- && (job.getConfiguration().get("counterLogDir") != null)) {
- LOG.info("Obtaining job replication counters for Hive DR job");
- Path counterFile = new Path(job.getConfiguration().get("counterLogDir"), "counter.txt");
- JobCounters hiveReplicationCounters = JobCountersHandler.getCountersType(
- JobType.HIVEREPLICATION.name());
- hiveReplicationCounters.obtainJobCounters(job.getConfiguration(), job, false);
- hiveReplicationCounters.storeJobCounters(job.getConfiguration(), counterFile);
- }
- }
- } catch (Exception e) {
- System.err.println("Exception encountered " + e.getMessage());
- e.printStackTrace();
- LOG.error("Exception encountered, cleaning up staging dirs", e);
- cleanup();
- return -1;
- }
-
- if (inputOptions.getExecutionStage().equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
- cleanup();
- }
-
- return 0;
- }
-
- private void init(String[] args) throws Exception {
- LOG.info("Initializing HiveDR");
- inputOptions = parseOptions(args);
- LOG.info("Input Options: {}", inputOptions);
-
- Configuration sourceConf = FileUtils.getConfiguration(inputOptions.getSourceWriteEP(),
- inputOptions.getSourceNNKerberosPrincipal());
- sourceClusterFS = FileSystem.get(sourceConf);
- Configuration targetConf = FileUtils.getConfiguration(inputOptions.getTargetWriteEP(),
- inputOptions.getTargetNNKerberosPrincipal());
- targetClusterFS = FileSystem.get(targetConf);
- jobConf = FileUtils.getConfiguration(inputOptions.getJobClusterWriteEP(),
- inputOptions.getJobClusterNNPrincipal());
- jobFS = FileSystem.get(jobConf);
-
- // init DR status store
- drStore = new HiveDRStatusStore(targetClusterFS);
- eventSoucerUtil = new EventSourcerUtils(jobConf, inputOptions.shouldKeepHistory(), inputOptions.getJobName());
- }
-
- private HiveDROptions parseOptions(String[] args) throws ParseException {
- return HiveDROptions.create(args);
- }
-
- public Job execute() throws Exception {
- assert inputOptions != null;
- assert getConf() != null;
- executionStage = inputOptions.getExecutionStage();
- LOG.info("Executing Workflow stage : {}", executionStage);
- if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.LASTEVENTS.name())) {
- String lastEventsIdFile = getLastEvents(jobConf);
- LOG.info("Last successfully replicated Event file : {}", lastEventsIdFile);
- return null;
- } else if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())) {
- createStagingDirectory();
- eventsMetaFile = sourceEvents();
- LOG.info("Sourced Events meta file : {}", eventsMetaFile);
- if (StringUtils.isEmpty(eventsMetaFile)) {
- LOG.info("No events to process");
- return null;
- } else {
- /*
- * eventsMetaFile contains the events to be processed by HiveDr. This file should be available
- * for the import action as well. Persist the file at a location common to both export and import.
- */
- persistEventsMetafileLocation(eventsMetaFile);
- }
- } else if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
- // read the location of eventsMetaFile from hdfs
- eventsMetaFile = getEventsMetaFileLocation();
- if (StringUtils.isEmpty(eventsMetaFile)) {
- LOG.info("No events to process");
- return null;
- }
- } else {
- throw new HiveReplicationException("Invalid Execution stage : " + inputOptions.getExecutionStage());
- }
-
- Job job = createJob();
- job.submit();
-
- String jobID = job.getJobID().toString();
- job.getConfiguration().set("HIVEDR_JOB_ID", jobID);
-
- LOG.info("HiveDR job-id: {}", jobID);
- if (inputOptions.shouldBlock() && !job.waitForCompletion(true)) {
- throw new IOException("HiveDR failure: Job " + jobID + " has failed: "
- + job.getStatus().getFailureInfo());
- }
-
- return job;
- }
-
- private Job createJob() throws Exception {
- String jobName = "hive-dr" + executionStage;
- String userChosenName = getConf().get(JobContext.JOB_NAME);
- if (userChosenName != null) {
- jobName += ": " + userChosenName;
- }
- Job job = Job.getInstance(getConf());
- job.setJobName(jobName);
- job.setJarByClass(CopyMapper.class);
- job.setMapperClass(CopyMapper.class);
- job.setReducerClass(CopyReducer.class);
- job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.NLineInputFormat.class);
-
- job.setOutputFormatClass(NullOutputFormat.class);
- job.setMapOutputKeyClass(Text.class);
- job.setMapOutputValueClass(Text.class);
-
- job.getConfiguration().set(JobContext.MAP_SPECULATIVE, "false");
- job.getConfiguration().set(JobContext.NUM_MAPS,
- String.valueOf(inputOptions.getReplicationMaxMaps()));
-
- for (HiveDRArgs args : HiveDRArgs.values()) {
- if (inputOptions.getValue(args) != null) {
- job.getConfiguration().set(args.getName(), inputOptions.getValue(args));
- } else {
- job.getConfiguration().set(args.getName(), "null");
- }
- }
- job.getConfiguration().set(FileInputFormat.INPUT_DIR, eventsMetaFile);
-
- return job;
- }
-
- private void createStagingDirectory() throws IOException, HiveReplicationException {
- Path sourceStagingPath = new Path(inputOptions.getSourceStagingPath());
- Path targetStagingPath = new Path(inputOptions.getTargetStagingPath());
- LOG.info("Source staging path: {}", sourceStagingPath);
- if (!FileSystem.mkdirs(sourceClusterFS, sourceStagingPath, STAGING_DIR_PERMISSION)) {
- throw new IOException("mkdir failed for " + sourceStagingPath);
- }
-
- LOG.info("Target staging path: {}", targetStagingPath);
- if (!FileSystem.mkdirs(targetClusterFS, targetStagingPath, STAGING_DIR_PERMISSION)) {
- throw new IOException("mkdir failed for " + targetStagingPath);
- }
- }
-
- private void cleanStagingDirectory() throws HiveReplicationException {
- LOG.info("Cleaning staging directories");
- Path sourceStagingPath = new Path(inputOptions.getSourceStagingPath());
- Path targetStagingPath = new Path(inputOptions.getTargetStagingPath());
- try {
- if (sourceClusterFS.exists(sourceStagingPath)) {
- sourceClusterFS.delete(sourceStagingPath, true);
- }
-
- if (targetClusterFS.exists(targetStagingPath)) {
- targetClusterFS.delete(targetStagingPath, true);
- }
- } catch (IOException e) {
- LOG.error("Unable to cleanup staging dir:", e);
- }
- }
-
- private String sourceEvents() throws Exception {
- MetaStoreEventSourcer defaultSourcer = null;
- String inputFilename = null;
- String lastEventsIdFile = FileUtils.DEFAULT_EVENT_STORE_PATH +File.separator+inputOptions.getJobName()+"/"
- +inputOptions.getJobName()+".id";
- Map<String, Long> lastEventsIdMap = getLastDBTableEvents(new Path(lastEventsIdFile));
- try {
- HCatClient sourceMetastoreClient = HiveMetastoreUtils.initializeHiveMetaStoreClient(
- inputOptions.getSourceMetastoreUri(),
- inputOptions.getSourceMetastoreKerberosPrincipal(),
- inputOptions.getSourceHive2KerberosPrincipal());
- defaultSourcer = new MetaStoreEventSourcer(sourceMetastoreClient,
- new DefaultPartitioner(drStore, eventSoucerUtil), eventSoucerUtil, lastEventsIdMap);
- inputFilename = defaultSourcer.sourceEvents(inputOptions);
- } finally {
- if (defaultSourcer != null) {
- defaultSourcer.cleanUp();
- }
- }
-
- return inputFilename;
- }
-
- private String getLastEvents(Configuration conf) throws Exception {
- LastReplicatedEvents lastEvents = new LastReplicatedEvents(conf,
- inputOptions.getTargetMetastoreUri(),
- inputOptions.getTargetMetastoreKerberosPrincipal(),
- inputOptions.getTargetHive2KerberosPrincipal(),
- drStore, inputOptions);
- String eventIdFile = lastEvents.getLastEvents(inputOptions);
- lastEvents.cleanUp();
- return eventIdFile;
- }
-
- private Map<String, Long> getLastDBTableEvents(Path lastEventIdFile) throws Exception {
- Map<String, Long> lastEventsIdMap = new HashMap<String, Long>();
- BufferedReader in = new BufferedReader(new InputStreamReader(jobFS.open(lastEventIdFile)));
- try {
- String line;
- while ((line=in.readLine())!=null) {
- String[] field = line.trim().split(DelimiterUtils.TAB_DELIM, -1);
- lastEventsIdMap.put(field[0], Long.parseLong(field[1]));
- }
- } catch (Exception e) {
- throw new IOException(e);
- } finally {
- IOUtils.closeQuietly(in);
- }
-
- return lastEventsIdMap;
- }
-
- public static void main(String[] args) {
- int exitCode;
- try {
- HiveDRTool hiveDRTool = new HiveDRTool();
- exitCode = ToolRunner.run(HiveDRUtils.getDefaultConf(), hiveDRTool, args);
- } catch (Exception e) {
- LOG.error("Couldn't complete HiveDR operation: ", e);
- exitCode = -1;
- }
-
- System.exit(exitCode);
- }
-
- private void cleanInputDir() {
- eventSoucerUtil.cleanUpEventInputDir();
- }
-
- private synchronized void cleanup() throws HiveReplicationException {
- cleanStagingDirectory();
- cleanInputDir();
- cleanTempFiles();
- }
-
- private void cleanTempFiles() {
- Path eventsDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, inputOptions.getJobName());
- Path metaFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + META_PATH_FILE_SUFFIX);
- Path eventsFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + ".id");
-
- try {
- if (jobFS.exists(metaFilePath)) {
- jobFS.delete(metaFilePath, true);
- }
- if (jobFS.exists(eventsFilePath)) {
- jobFS.delete(eventsFilePath, true);
- }
- } catch (IOException e) {
- LOG.error("Deleting Temp files failed", e);
- }
- }
-
- public void persistEventsMetafileLocation(final String eventMetaFilePath) throws IOException {
- Path eventsDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, inputOptions.getJobName());
- Path metaFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + META_PATH_FILE_SUFFIX);
-
- OutputStream out = null;
- try {
- out = FileSystem.create(jobFS, metaFilePath, FileUtils.FS_PERMISSION_700);
- out.write(eventMetaFilePath.getBytes());
- out.flush();
- } finally {
- IOUtils.closeQuietly(out);
- }
- }
-
- private String getEventsMetaFileLocation() throws IOException {
- Path eventsDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, inputOptions.getJobName());
- Path metaFilePath = new Path(eventsDirPath.toString(), inputOptions.getJobName() + META_PATH_FILE_SUFFIX);
- String line = null;
- if (jobFS.exists(metaFilePath)) {
- BufferedReader in = new BufferedReader(new InputStreamReader(jobFS.open(metaFilePath)));
- line = in.readLine();
- in.close();
- }
- return line;
- }
-
-
- public static void usage() {
- System.out.println("Usage: hivedrtool -option value ....");
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/LastReplicatedEvents.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/LastReplicatedEvents.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/LastReplicatedEvents.java
deleted file mode 100644
index bae6c9e..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/LastReplicatedEvents.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.DelimiterUtils;
-import org.apache.falcon.hive.util.FileUtils;
-import org.apache.falcon.hive.util.HiveDRUtils;
-import org.apache.falcon.hive.util.HiveMetastoreUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.HCatTable;
-import org.apache.hive.hcatalog.api.repl.ReplicationUtils;
-import org.apache.hive.hcatalog.common.HCatException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Sources meta store change events from Hive.
- */
-public class LastReplicatedEvents {
- private static final Logger LOG = LoggerFactory.getLogger(LastReplicatedEvents.class);
- private final HCatClient targetMetastoreClient;
- private final DRStatusStore drStore;
- private final FileSystem jobFS;
- private Path eventsInputDirPath;
-
- /* TODO handle cases when no events. files will be empty and lists will be empty */
- public LastReplicatedEvents(Configuration conf, String targetMetastoreUri,
- String targetMetastoreKerberosPrincipal,
- String targetHive2KerberosPrincipal,
- DRStatusStore drStore, HiveDROptions inputOptions) throws Exception {
- targetMetastoreClient = HiveMetastoreUtils.initializeHiveMetaStoreClient(targetMetastoreUri,
- targetMetastoreKerberosPrincipal, targetHive2KerberosPrincipal);
- jobFS = FileSystem.get(conf);
- this.drStore = drStore;
- init(inputOptions.getJobName());
- }
-
- private void init(final String jobName) throws Exception {
- // Create base dir to store events on cluster where job is running
- Path dir = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH);
- // Validate base path
- FileUtils.validatePath(jobFS, new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH));
-
- if (!jobFS.exists(dir)) {
- if (!jobFS.mkdirs(dir)) {
- throw new Exception("Creating directory failed: " + dir);
- }
- }
-
- eventsInputDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, jobName);
-
- if (!jobFS.exists(eventsInputDirPath)) {
- if (!jobFS.mkdirs(eventsInputDirPath)) {
- throw new Exception("Creating directory failed: " + eventsInputDirPath);
- }
- }
- }
-
- public String getLastEvents(HiveDROptions inputOptions) throws Exception {
- HiveDRUtils.ReplicationType replicationType = HiveDRUtils.getReplicationType(inputOptions.getSourceTables());
- LOG.info("Obtaining last events for replicationType : {}", replicationType);
- HashMap<String, Long> lastEvents = new HashMap<String, Long>();
- if (replicationType == HiveDRUtils.ReplicationType.DB) {
- List<String> dbNames = inputOptions.getSourceDatabases();
- for (String db : dbNames) {
- lastEvents.put(db, getLastSavedEventId(inputOptions, db, null));
- }
- } else {
- List<String> tableNames = inputOptions.getSourceTables();
- String db = inputOptions.getSourceDatabases().get(0);
- for (String tableName : tableNames) {
- lastEvents.put(db + "." + tableName, getLastSavedEventId(inputOptions, db, tableName));
- }
- }
-
- return persistLastEventsToFile(lastEvents, inputOptions.getJobName());
- }
-
- private long getLastSavedEventId(HiveDROptions inputOptions, final String dbName,
- final String tableName) throws Exception {
- HiveDRUtils.ReplicationType replicationType = HiveDRUtils.getReplicationType(inputOptions.getSourceTables());
- String jobName = inputOptions.getJobName();
- String sourceMetastoreUri = inputOptions.getSourceMetastoreUri();
- String targetMetastoreUri = inputOptions.getTargetMetastoreUri();
-
- long eventId = 0;
- if (HiveDRUtils.ReplicationType.DB == replicationType) {
- eventId = drStore.getReplicationStatus(sourceMetastoreUri, targetMetastoreUri,
- jobName, dbName).getEventId();
- } else if (HiveDRUtils.ReplicationType.TABLE == replicationType) {
- eventId = drStore.getReplicationStatus(sourceMetastoreUri, targetMetastoreUri,
- jobName, dbName, tableName).getEventId();
- }
-
- if (eventId == -1) {
- if (HiveDRUtils.ReplicationType.DB == replicationType) {
- /*
- * API to get last repl ID for a DB is very expensive, so Hive does not want to make it public.
- * HiveDrTool finds last repl id for DB by finding min last repl id of all tables.
- */
- eventId = getLastReplicationIdForDatabase(dbName);
- } else {
- HCatTable table = targetMetastoreClient.getTable(dbName, tableName);
- eventId = ReplicationUtils.getLastReplicationId(table);
- }
- }
-
- if ((StringUtils.isEmpty(tableName))) {
- LOG.info("Last replicated eventId for DB : {} is {}", dbName, eventId);
- } else {
- LOG.info("Last replicated eventId for DB : {} Table : {} is {}", dbName, tableName, eventId);
- }
-
- return eventId;
- }
-
- private long getLastReplicationIdForDatabase(String databaseName) throws HiveReplicationException {
- /*
- * This is a very expensive method and should only be called during first dbReplication instance.
- */
- long eventId = Long.MAX_VALUE;
- try {
- List<String> tableList = targetMetastoreClient.listTableNamesByPattern(databaseName, "*");
- for (String tableName : tableList) {
- long temp = ReplicationUtils.getLastReplicationId(
- targetMetastoreClient.getTable(databaseName, tableName));
- if (temp < eventId) {
- eventId = temp;
- }
- }
- return (eventId == Long.MAX_VALUE) ? 0 : eventId;
- } catch (HCatException e) {
- throw new HiveReplicationException("Unable to find last replication id for database "
- + databaseName, e);
- }
- }
-
- public String persistLastEventsToFile(final HashMap<String, Long> lastEvents,
- final String identifier) throws IOException {
- if (lastEvents.size()!=0) {
- Path eventsFile = new Path(eventsInputDirPath.toString(), identifier+".id");
- OutputStream out = null;
-
- try {
- out = FileSystem.create(jobFS, eventsFile, FileUtils.FS_PERMISSION_700);
- for (Map.Entry<String, Long> entry : lastEvents.entrySet()) {
- out.write(entry.getKey().getBytes());
- out.write(DelimiterUtils.TAB_DELIM.getBytes());
- out.write(String.valueOf(entry.getValue()).getBytes());
- out.write(DelimiterUtils.NEWLINE_DELIM.getBytes());
- }
- out.flush();
- } finally {
- IOUtils.closeQuietly(out);
- }
- return jobFS.makeQualified(eventsFile).toString();
- } else {
- return null;
- }
- }
-
- public void cleanUp() throws Exception {
- if (targetMetastoreClient != null) {
- targetMetastoreClient.close();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/MetaStoreEventSourcer.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/MetaStoreEventSourcer.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/MetaStoreEventSourcer.java
deleted file mode 100644
index f008883..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/MetaStoreEventSourcer.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.util.EventSourcerUtils;
-import org.apache.falcon.hive.util.HiveDRUtils;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.repl.ReplicationTask;
-import org.apache.hive.hcatalog.api.repl.StagingDirectoryProvider;
-import org.apache.hive.hcatalog.common.HCatException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.OutputStream;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Sources meta store change events from Hive.
- */
-public class MetaStoreEventSourcer implements EventSourcer {
-
- private static final Logger LOG = LoggerFactory.getLogger(MetaStoreEventSourcer.class);
-
- private final HCatClient sourceMetastoreClient;
- private final Partitioner partitioner;
- private final EventSourcerUtils eventSourcerUtils;
- private final ReplicationEventMetadata eventMetadata;
- private Map<String, Long> lastEventsIdMap = null;
- private long lastCounter;
-
- /* TODO handle cases when no events. files will be empty and lists will be empty */
- public MetaStoreEventSourcer(HCatClient sourceMetastoreClient, Partitioner partitioner,
- EventSourcerUtils eventSourcerUtils,
- Map<String, Long> lastEventsIdMap) throws Exception {
- this.sourceMetastoreClient = sourceMetastoreClient;
- this.eventMetadata = new ReplicationEventMetadata();
- this.partitioner = partitioner;
- this.eventSourcerUtils = eventSourcerUtils;
- this.lastEventsIdMap = lastEventsIdMap;
- }
-
- public String sourceEvents(HiveDROptions inputOptions) throws Exception {
- HiveDRUtils.ReplicationType replicationType = HiveDRUtils.getReplicationType(inputOptions.getSourceTables());
- LOG.info("Sourcing replication events for type : {}", replicationType);
- if (replicationType == HiveDRUtils.ReplicationType.DB) {
- List<String> dbNames = inputOptions.getSourceDatabases();
- for (String db : dbNames) {
- ++lastCounter;
- sourceEventsForDb(inputOptions, db);
- }
- } else {
- List<String> tableNames = inputOptions.getSourceTables();
- String db = inputOptions.getSourceDatabases().get(0);
- for (String tableName : tableNames) {
- ++lastCounter;
- sourceEventsForTable(inputOptions, db, tableName);
- }
- }
-
- if (eventMetadata.getEventFileMetadata() == null || eventMetadata.getEventFileMetadata().isEmpty()) {
- LOG.info("No events for tables for the request db: {} , Tables : {}", inputOptions.getSourceDatabases(),
- inputOptions.getSourceTables());
- eventSourcerUtils.cleanUpEventInputDir();
- return null;
- } else {
- return eventSourcerUtils.persistToMetaFile(eventMetadata, inputOptions.getJobName());
- }
- }
-
- private void sourceEventsForDb(HiveDROptions inputOptions, String dbName) throws Exception {
- Iterator<ReplicationTask> replicationTaskIter = sourceReplicationEvents(getLastSavedEventId(dbName, null),
- inputOptions.getMaxEvents(), dbName, null);
- if (replicationTaskIter == null || !replicationTaskIter.hasNext()) {
- LOG.info("No events for db: {}", dbName);
- return;
- }
- processEvents(dbName, null, inputOptions, replicationTaskIter);
- }
-
- private void sourceEventsForTable(HiveDROptions inputOptions, String dbName, String tableName)
- throws Exception {
- Iterator<ReplicationTask> replicationTaskIter = sourceReplicationEvents(getLastSavedEventId(dbName, tableName),
- inputOptions.getMaxEvents(), dbName, tableName
- );
- if (replicationTaskIter == null || !replicationTaskIter.hasNext()) {
- LOG.info("No events for db.table: {}.{}", dbName, tableName);
- return;
- }
- processEvents(dbName, tableName, inputOptions, replicationTaskIter);
- }
-
- private void processEvents(String dbName, String tableName, HiveDROptions inputOptions,
- Iterator<ReplicationTask> replicationTaskIter) throws Exception {
- if (partitioner.isPartitioningRequired(inputOptions)) {
- ReplicationEventMetadata dbEventMetadata = partitioner.partition(inputOptions, dbName, replicationTaskIter);
-
- if (dbEventMetadata == null || dbEventMetadata.getEventFileMetadata() == null
- || dbEventMetadata.getEventFileMetadata().isEmpty()) {
- LOG.info("No events for db: {} , Table : {}", dbName, tableName);
- } else {
- EventSourcerUtils.updateEventMetadata(eventMetadata, dbEventMetadata);
- }
- } else {
- processTableReplicationEvents(replicationTaskIter, dbName, tableName,
- inputOptions.getSourceStagingPath(), inputOptions.getTargetStagingPath());
- }
- }
-
- private long getLastSavedEventId(final String dbName, final String tableName) throws Exception {
- String key = dbName;
- if (StringUtils.isNotEmpty(tableName)) {
- key = dbName + "." + tableName;
- }
- long eventId = lastEventsIdMap.get(key);
- LOG.info("LastSavedEventId eventId for {} : {}", key, eventId);
- return eventId;
- }
-
- private Iterator<ReplicationTask> sourceReplicationEvents(long lastEventId, int maxEvents, String dbName,
- String tableName) throws Exception {
- try {
- return sourceMetastoreClient.getReplicationTasks(lastEventId, maxEvents, dbName, tableName);
- } catch (HCatException e) {
- throw new Exception("Exception getting replication events " + e.getMessage(), e);
- }
- }
-
-
- protected void processTableReplicationEvents(Iterator<ReplicationTask> taskIter, String dbName,
- String tableName, String srcStagingDirProvider,
- String dstStagingDirProvider) throws Exception {
- String srcFilename = null;
- String tgtFilename = null;
- OutputStream srcOutputStream = null;
- OutputStream tgtOutputStream = null;
-
- while (taskIter.hasNext()) {
- ReplicationTask task = taskIter.next();
- if (task.needsStagingDirs()) {
- task.withSrcStagingDirProvider(new StagingDirectoryProvider.TrivialImpl(srcStagingDirProvider,
- HiveDRUtils.SEPARATOR));
- task.withDstStagingDirProvider(new StagingDirectoryProvider.TrivialImpl(dstStagingDirProvider,
- HiveDRUtils.SEPARATOR));
- }
-
- if (task.isActionable()) {
- Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> srcCmds = task.getSrcWhCommands();
- if (srcCmds != null) {
- if (StringUtils.isEmpty(srcFilename)) {
- srcFilename = eventSourcerUtils.getSrcFileName(String.valueOf(lastCounter)).toString();
- srcOutputStream = eventSourcerUtils.getFileOutputStream(srcFilename);
- }
- eventSourcerUtils.persistReplicationEvents(srcOutputStream, srcCmds);
- }
-
-
- Iterable<? extends org.apache.hive.hcatalog.api.repl.Command> dstCmds = task.getDstWhCommands();
- if (dstCmds != null) {
- if (StringUtils.isEmpty(tgtFilename)) {
- tgtFilename = eventSourcerUtils.getTargetFileName(String.valueOf(lastCounter)).toString();
- tgtOutputStream = eventSourcerUtils.getFileOutputStream(tgtFilename);
- }
- eventSourcerUtils.persistReplicationEvents(tgtOutputStream, dstCmds);
- }
-
- } else {
- LOG.error("Task is not actionable with event Id : {}", task.getEvent().getEventId());
- }
- }
- // Close the stream
- eventSourcerUtils.closeOutputStream(srcOutputStream);
- eventSourcerUtils.closeOutputStream(tgtOutputStream);
- EventSourcerUtils.updateEventMetadata(eventMetadata, dbName, tableName, srcFilename, tgtFilename);
- }
-
- public String persistToMetaFile(String jobName) throws Exception {
- return eventSourcerUtils.persistToMetaFile(eventMetadata, jobName);
- }
-
- public void cleanUp() throws Exception {
- if (sourceMetastoreClient != null) {
- sourceMetastoreClient.close();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/Partitioner.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/Partitioner.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/Partitioner.java
deleted file mode 100644
index 25b8bd6..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/Partitioner.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import org.apache.hive.hcatalog.api.repl.ReplicationTask;
-
-import java.util.Iterator;
-
-/**
- * Partition hive events.
- */
-public interface Partitioner {
- /**
- * Partition events.
- *
- * @param options Hive dr options.
- * @param dbName Database name.
- * @param replicationTaskIterator Repl task iterator.
- * @return ReplicationEventMetadata
- */
- ReplicationEventMetadata partition(final HiveDROptions options,
- final String dbName,
- final Iterator<ReplicationTask> replicationTaskIterator) throws Exception;
-
- boolean isPartitioningRequired(final HiveDROptions options);
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/ReplicationEventMetadata.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/ReplicationEventMetadata.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/ReplicationEventMetadata.java
deleted file mode 100644
index 79e0ded..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/ReplicationEventMetadata.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import java.util.Map;
-import java.util.HashMap;
-
-/**
- * Replication event meta data class.
- */
-public class ReplicationEventMetadata {
-
- private Map<String, String> eventFileMetadata = new HashMap<>();
-
- public Map<String, String> getEventFileMetadata() {
- return eventFileMetadata;
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/exception/HiveReplicationException.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/exception/HiveReplicationException.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/exception/HiveReplicationException.java
deleted file mode 100644
index 0baf6d8..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/exception/HiveReplicationException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.exception;
-
-/**
- * Wrapper class for HiveReplication exceptions.
- */
-public class HiveReplicationException extends Exception {
-
- /**
- * @param e Exception
- */
- public HiveReplicationException(Throwable e) {
- super(e);
- }
-
- public HiveReplicationException(String message, Throwable e) {
- super(message, e);
- }
-
- /**
- * @param message - custom exception message
- */
- public HiveReplicationException(String message) {
- super(message);
- }
-
- /**
- *
- */
- private static final long serialVersionUID = -1475818869309247014L;
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyCommitter.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyCommitter.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyCommitter.java
deleted file mode 100644
index 98449f0..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyCommitter.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.mapreduce;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Copy committer class.
- */
-public class CopyCommitter extends FileOutputCommitter {
-
- private static final Logger LOG = LoggerFactory.getLogger(CopyCommitter.class);
-
- /**
- * Create a file output committer.
- *
- * @param outputPath the job's output path, or null if you want the output
- * committer to act as a noop.
- * @param context the task's context
- * @throws java.io.IOException
- */
- public CopyCommitter(Path outputPath,
- TaskAttemptContext context) throws IOException {
- super(outputPath, context);
- }
-
- @Override
- public void commitJob(JobContext jobContext) throws IOException {
- Configuration conf = jobContext.getConfiguration();
-
- try {
- super.commitJob(jobContext);
- } finally {
- cleanup(conf);
- }
- }
-
- private void cleanup(Configuration conf) {
- // clean up staging and other data
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyMapper.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyMapper.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyMapper.java
deleted file mode 100644
index 08e0551..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyMapper.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.mapreduce;
-
-import org.apache.falcon.hive.HiveDRArgs;
-import org.apache.falcon.hive.util.EventUtils;
-import org.apache.falcon.hive.util.HiveDRUtils;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.apache.falcon.job.ReplicationJobCountersList;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.List;
-
-/**
- * Map class for Hive DR.
- */
-public class CopyMapper extends Mapper<LongWritable, Text, Text, Text> {
-
- private static final Logger LOG = LoggerFactory.getLogger(CopyMapper.class);
- private EventUtils eventUtils;
-
- @Override
- protected void setup(Context context) throws IOException, InterruptedException {
- eventUtils = new EventUtils(context.getConfiguration());
- eventUtils.initializeFS();
- try {
- eventUtils.setupConnection();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
- @Override
- protected void map(LongWritable key, Text value,
- Context context) throws IOException, InterruptedException {
- LOG.debug("Processing Event value: {}", value.toString());
-
- try {
- eventUtils.processEvents(value.toString());
- } catch (Exception e) {
- LOG.error("Exception in processing events:", e);
- throw new IOException(e);
- } finally {
- cleanup(context);
- }
- List<ReplicationStatus> replicationStatusList = eventUtils.getListReplicationStatus();
- if (replicationStatusList != null && !replicationStatusList.isEmpty()) {
- for (ReplicationStatus rs : replicationStatusList) {
- context.write(new Text(rs.getJobName()), new Text(rs.toString()));
- }
- }
-
- // In case of export stage, populate custom counters
- if (context.getConfiguration().get(HiveDRArgs.EXECUTION_STAGE.getName())
- .equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())
- && !eventUtils.isCountersMapEmtpy()) {
- context.getCounter(ReplicationJobCountersList.BYTESCOPIED).increment(
- eventUtils.getCounterValue(ReplicationJobCountersList.BYTESCOPIED.getName()));
- context.getCounter(ReplicationJobCountersList.COPY).increment(
- eventUtils.getCounterValue(ReplicationJobCountersList.COPY.getName()));
- }
- }
-
- protected void cleanup(Context context) throws IOException, InterruptedException {
- LOG.info("Invoking cleanup process");
- super.cleanup(context);
- try {
- if (context.getConfiguration().get(HiveDRArgs.EXECUTION_STAGE.getName())
- .equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
- eventUtils.cleanEventsDirectory();
- }
- } catch (IOException e) {
- LOG.error("Cleaning up of events directories failed", e);
- } finally {
- try {
- eventUtils.closeConnection();
- } catch (SQLException e) {
- LOG.error("Closing the connections failed", e);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java
deleted file mode 100644
index 50cb4b2..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/mapreduce/CopyReducer.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.mapreduce;
-
-
-import org.apache.falcon.hive.HiveDRArgs;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.FileUtils;
-import org.apache.falcon.hive.util.HiveDRStatusStore;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Reducer;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
-/**
- * Reducer class for Hive DR.
- */
-public class CopyReducer extends Reducer<Text, Text, Text, Text> {
- private DRStatusStore hiveDRStore;
-
- @Override
- protected void setup(Context context) throws IOException, InterruptedException {
- Configuration conf = context.getConfiguration();
- FileSystem fs= FileSystem.get(FileUtils.getConfiguration(
- conf.get(HiveDRArgs.TARGET_NN.getName()),
- conf.get(HiveDRArgs.TARGET_NN_KERBEROS_PRINCIPAL.getName())));
- hiveDRStore = new HiveDRStatusStore(fs);
- }
-
- private List<ReplicationStatus> sortStatusList(List<ReplicationStatus> replStatusList) {
- Collections.sort(replStatusList, new Comparator<ReplicationStatus>() {
- @Override
- public int compare(ReplicationStatus r1, ReplicationStatus r2) {
- return (int) (r1.getEventId() - r2.getEventId());
- }
- });
- return replStatusList;
- }
-
- @Override
- protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
- List<ReplicationStatus> replStatusList = new ArrayList<ReplicationStatus>();
- ReplicationStatus rs;
- try {
- for (Text value : values) {
- String[] fields = (value.toString()).split("\t");
- rs = new ReplicationStatus(fields[0], fields[1], fields[2], fields[3], fields[4],
- ReplicationStatus.Status.valueOf(fields[5]), Long.parseLong(fields[6]));
- replStatusList.add(rs);
- }
-
- hiveDRStore.updateReplicationStatus(key.toString(), sortStatusList(replStatusList));
- } catch (HiveReplicationException e) {
- throw new IOException(e);
- }
- }
-
- @Override
- protected void cleanup(Context context) throws IOException, InterruptedException {
- }
-}
[3/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DBReplicationStatus.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DBReplicationStatus.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DBReplicationStatus.java
deleted file mode 100644
index 6dceb8e..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DBReplicationStatus.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Class to store replication status of a DB and it's tables.
- */
-public class DBReplicationStatus {
-
- private static final Logger LOG = LoggerFactory.getLogger(DBReplicationStatus.class);
- private static final String DB_STATUS = "db_status";
- private static final String TABLE_STATUS = "table_status";
-
- private Map<String, ReplicationStatus> tableStatuses = new HashMap<String, ReplicationStatus>();
- private ReplicationStatus databaseStatus;
-
- public DBReplicationStatus(ReplicationStatus dbStatus) throws HiveReplicationException {
- setDatabaseStatus(dbStatus);
- }
-
- public DBReplicationStatus(ReplicationStatus dbStatus,
- Map<String, ReplicationStatus> tableStatuses) throws HiveReplicationException {
- /*
- The order of set method calls is important to ensure tables that do not belong to same db
- are not added to this DBReplicationStatus
- */
- setDatabaseStatus(dbStatus);
- setTableStatuses(tableStatuses);
- }
-
- // Serialize
- public String toJsonString() throws HiveReplicationException {
- JSONObject retObject = new JSONObject();
- JSONObject tableStatus = new JSONObject();
- try {
- for (Map.Entry<String, ReplicationStatus> status : tableStatuses.entrySet()) {
- tableStatus.put(status.getKey(), status.getValue().toJsonObject());
- }
- retObject.put(DB_STATUS, databaseStatus.toJsonObject());
- retObject.put(TABLE_STATUS, tableStatus);
- return retObject.toString(ReplicationStatus.INDENT_FACTOR);
- } catch (JSONException e) {
- throw new HiveReplicationException("Unable to serialize Database Replication Status", e);
- }
- }
-
- // de-serialize
- public DBReplicationStatus(String jsonString) throws HiveReplicationException {
- try {
- JSONObject object = new JSONObject(jsonString);
- ReplicationStatus dbstatus = new ReplicationStatus(object.get(DB_STATUS).toString());
- setDatabaseStatus(dbstatus);
-
- JSONObject tableJson = object.getJSONObject(TABLE_STATUS);
- Iterator keys = tableJson.keys();
- while(keys.hasNext()) {
- String key = keys.next().toString();
- ReplicationStatus value = new ReplicationStatus(tableJson.get(key).toString());
- if (value.getDatabase().equals(dbstatus.getDatabase())) {
- tableStatuses.put(key.toLowerCase(), value);
- } else {
- throw new HiveReplicationException("Unable to create DBReplicationStatus from JsonString. "
- + "Cannot set status for table " + value.getDatabase() + "." + value.getTable()
- + ", It does not belong to DB " + dbstatus.getDatabase());
- }
- }
- } catch (JSONException e) {
- throw new HiveReplicationException("Unable to create DBReplicationStatus from JsonString", e);
- }
- }
-
- public Map<String, ReplicationStatus> getTableStatuses() {
- return tableStatuses;
- }
-
- public ReplicationStatus getTableStatus(String tableName) throws HiveReplicationException {
- tableName = tableName.toLowerCase();
- if (tableStatuses.containsKey(tableName)) {
- return tableStatuses.get(tableName);
- }
- return new ReplicationStatus(databaseStatus.getSourceUri(), databaseStatus.getTargetUri(),
- databaseStatus.getJobName(), databaseStatus.getDatabase(),
- tableName, ReplicationStatus.Status.INIT, -1);
- }
-
- public Iterator<ReplicationStatus> getTableStatusIterator() {
- List<ReplicationStatus> resultSet = new ArrayList<ReplicationStatus>();
- for (Map.Entry<String, ReplicationStatus> entry : tableStatuses.entrySet()) {
- resultSet.add(entry.getValue());
- }
- return resultSet.iterator();
- }
-
- private void setTableStatuses(Map<String, ReplicationStatus> tableStatuses) throws HiveReplicationException {
- for (Map.Entry<String, ReplicationStatus> entry : tableStatuses.entrySet()) {
- if (!entry.getValue().getDatabase().equals(databaseStatus.getDatabase())) {
- throw new HiveReplicationException("Cannot set status for table " + entry.getValue().getDatabase()
- + "." + entry.getValue().getTable() + ", It does not belong to DB "
- + databaseStatus.getDatabase());
- } else {
- this.tableStatuses.put(entry.getKey().toLowerCase(), entry.getValue());
- }
- }
- }
-
- public ReplicationStatus getDatabaseStatus() {
- return databaseStatus;
- }
-
- private void setDatabaseStatus(ReplicationStatus databaseStatus) {
- this.databaseStatus = databaseStatus;
- }
-
- /**
- * Update DB status from table statuses.
- case 1) All tables replicated successfully.
- Take the largest successful eventId and set dbReplStatus as success
- case 2) One or many tables failed to replicate
- Take the smallest eventId amongst the failed tables and set dbReplStatus as failed.
- */
- public void updateDbStatusFromTableStatuses() throws HiveReplicationException {
- if (tableStatuses.size() == 0) {
- // nothing to do
- return;
- }
-
- databaseStatus.setStatus(ReplicationStatus.Status.SUCCESS);
- long successEventId = databaseStatus.getEventId();
- long failedEventId = -1;
-
- for (Map.Entry<String, ReplicationStatus> entry : tableStatuses.entrySet()) {
- long eventId = entry.getValue().getEventId();
- if (entry.getValue().getStatus().equals(ReplicationStatus.Status.SUCCESS)) {
- if (eventId > successEventId) {
- successEventId = eventId;
- }
- } else if (entry.getValue().getStatus().equals(ReplicationStatus.Status.FAILURE)) {
- databaseStatus.setStatus(ReplicationStatus.Status.FAILURE);
- if (eventId < failedEventId || failedEventId == -1) {
- failedEventId = eventId;
- }
- } //else , if table status is Status.INIT, it should not change lastEventId of DB
- }
-
- String log = "Updating DB Status based on table replication status. Status : "
- + databaseStatus.getStatus().toString() + ", eventId : ";
- if (databaseStatus.getStatus().equals(ReplicationStatus.Status.SUCCESS)) {
- databaseStatus.setEventId(successEventId);
- LOG.info(log + String.valueOf(successEventId));
- } else if (databaseStatus.getStatus().equals(ReplicationStatus.Status.FAILURE)) {
- databaseStatus.setEventId(failedEventId);
- LOG.error(log + String.valueOf(failedEventId));
- }
-
- }
-
- public void updateDbStatus(ReplicationStatus status) throws HiveReplicationException {
- if (StringUtils.isNotEmpty(status.getTable())) {
- throw new HiveReplicationException("Cannot update DB Status. This is table level status.");
- }
-
- if (this.databaseStatus.getDatabase().equals(status.getDatabase())) {
- this.databaseStatus = status;
- } else {
- throw new HiveReplicationException("Cannot update Database Status. StatusDB "
- + status.getDatabase() + " does not match current DB "
- + this.databaseStatus.getDatabase());
- }
- }
-
- public void updateTableStatus(ReplicationStatus status) throws HiveReplicationException {
- if (StringUtils.isEmpty(status.getTable())) {
- throw new HiveReplicationException("Cannot update Table Status. Table name is empty.");
- }
-
- if (this.databaseStatus.getDatabase().equals(status.getDatabase())) {
- this.tableStatuses.put(status.getTable(), status);
- } else {
- throw new HiveReplicationException("Cannot update Table Status. TableDB "
- + status.getDatabase() + " does not match current DB "
- + this.databaseStatus.getDatabase());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DRStatusStore.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DRStatusStore.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DRStatusStore.java
deleted file mode 100644
index cf6b7ad..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DRStatusStore.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import java.util.Iterator;
-import java.util.List;
-
-/**
- * Abstract class for Data Replication Status Store.
- */
-public abstract class DRStatusStore {
-
- public static final String BASE_DEFAULT_STORE_PATH = "/apps/data-mirroring/";
- public static final FsPermission DEFAULT_STORE_PERMISSION =
- new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE);
-
- private static String storeGroup = "users";
-
-
- /**
- * Update replication status of a table(s)/db after replication job jobName completes.
- * @param jobName Name of the replication job.
- * @param statusList List of replication statuses of db/tables replicated by jobName.
- */
- public abstract void updateReplicationStatus(String jobName, List<ReplicationStatus> statusList)
- throws HiveReplicationException;
-
- /**
- * Get Replication status for a database.
- * @param source Replication source uri.
- * @param target Replication target uri.
- * @param jobName Name of the replication job.
- * @param database Name of the target database.
- * @return ReplicationStatus
- * destination commands for each table
- */
- public abstract ReplicationStatus getReplicationStatus(String source, String target,
- String jobName, String database)
- throws HiveReplicationException;
-
- /**
- * Get Replication status for a table.
- * @param source Replication source uri.
- * @param target Replication target uri.
- * @param jobName Name of the replication job.
- * @param database Name of the target database.
- * @param table Name of the target table.
- * @return ReplicationStatus
- * destination commands for each table
- */
- public abstract ReplicationStatus getReplicationStatus(String source, String target,
- String jobName, String database,
- String table) throws HiveReplicationException;
-
- /**
- * Get Replication status of all tables in a database.
- * @param source Replication source uri.
- * @param target Replication target uri.
- * @param jobName Name of the replication job.
- * @param database Name of the target database.
- * @return Iterator
- * destination commands for each table
- */
- public abstract Iterator<ReplicationStatus> getTableReplicationStatusesInDb(String source, String target,
- String jobName, String database)
- throws HiveReplicationException;
-
-
- /**
- * Delete a replication job.
- * @param jobName Name of the replication job.
- * @param database Name of the target database.
- * destination commands for each table
- */
- public abstract void deleteReplicationStatus(String jobName, String database) throws HiveReplicationException;
-
- public static String getStoreGroup() {
- return storeGroup;
- }
-
- public static void setStoreGroup(String group) {
- storeGroup = group;
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DelimiterUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DelimiterUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DelimiterUtils.java
deleted file mode 100644
index 3b3156f..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/DelimiterUtils.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-/**
- * Public delimiters used for event processing.
- */
-public final class DelimiterUtils {
- public static final String FIELD_DELIM = "\u0001";
- public static final String NEWLINE_DELIM = System.getProperty("line.separator");
- public static final String TAB_DELIM = "\t";
-
- private DelimiterUtils() {}
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventSourcerUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventSourcerUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventSourcerUtils.java
deleted file mode 100644
index fb695d0..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventSourcerUtils.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.ReplicationEventMetadata;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hive.hcatalog.api.repl.Command;
-import org.apache.hive.hcatalog.api.repl.ReplicationUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Map;
-
-/**
- * Utility methods for event sourcer.
- */
-public class EventSourcerUtils {
-
- private static final String METAFILE_EXTENSION = ".meta";
- private static final String SRCFILE_EXTENSION = ".src";
- private static final String TGTFILE_EXTENSION = ".tgt";
- private Path eventsInputDirPath;
- private final boolean shouldKeepHistory;
- private final FileSystem jobFS;
-
- private static final Logger LOG = LoggerFactory.getLogger(EventSourcerUtils.class);
-
- public EventSourcerUtils(final Configuration conf, final boolean shouldKeepHistory,
- final String jobName) throws Exception {
- this.shouldKeepHistory = shouldKeepHistory;
- jobFS = FileSystem.get(conf);
- init(jobName);
- }
-
- private void init(final String jobName) throws Exception {
- // Create base dir to store events on cluster where job is running
- Path dir = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH);
- // Validate base path
- FileUtils.validatePath(jobFS, new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH));
-
- if (!jobFS.exists(dir)) {
- if (!jobFS.mkdirs(dir)) {
- throw new Exception("Creating directory failed: " + dir);
- }
- }
-
- eventsInputDirPath = new Path(FileUtils.DEFAULT_EVENT_STORE_PATH, jobName);
-
- if (!jobFS.exists(eventsInputDirPath)) {
- if (!jobFS.mkdirs(eventsInputDirPath)) {
- throw new Exception("Creating directory failed: " + eventsInputDirPath);
- }
- }
- }
-
- public OutputStream getFileOutputStream(final String path) throws Exception {
- return FileSystem.create(jobFS, new Path(path), FileUtils.FS_PERMISSION_700);
- }
-
- public void closeOutputStream(OutputStream out) throws IOException {
- if (out != null) {
- try {
- out.flush();
- } finally {
- IOUtils.closeQuietly(out);
- }
- }
- }
-
- public void persistReplicationEvents(final OutputStream out,
- final java.lang.Iterable
- <? extends org.apache.hive.hcatalog.api.repl.Command> cmds)
- throws Exception {
- for (Command cmd : cmds) {
- persistReplicationEvents(out, cmd);
- }
- }
-
- public void persistReplicationEvents(final OutputStream out,
- final Command cmd) throws Exception {
- out.write(ReplicationUtils.serializeCommand(cmd).getBytes());
- LOG.debug("HiveDR Serialized Repl Command : {}", cmd);
- out.write(DelimiterUtils.NEWLINE_DELIM.getBytes());
- }
-
- public String persistToMetaFile(final ReplicationEventMetadata data, final String identifier) throws IOException {
- if (data != null && data.getEventFileMetadata() != null && !data.getEventFileMetadata().isEmpty()) {
- Path metaFilename = new Path(eventsInputDirPath.toString(), identifier + METAFILE_EXTENSION);
- OutputStream out = null;
-
- try {
- out = FileSystem.create(jobFS, metaFilename, FileUtils.FS_PERMISSION_700);
-
- for (Map.Entry<String, String> entry : data.getEventFileMetadata().entrySet()) {
- out.write(entry.getKey().getBytes());
- out.write(DelimiterUtils.FIELD_DELIM.getBytes());
- out.write(entry.getValue().getBytes());
- out.write(DelimiterUtils.NEWLINE_DELIM.getBytes());
- }
- out.flush();
- } finally {
- IOUtils.closeQuietly(out);
- }
- return jobFS.makeQualified(metaFilename).toString();
- } else {
- return null;
- }
- }
-
- public static void updateEventMetadata(ReplicationEventMetadata data, final String dbName, final String tableName,
- final String srcFilename, final String tgtFilename) {
- if (data == null || data.getEventFileMetadata() == null) {
- return;
- }
- StringBuilder key = new StringBuilder();
-
- if (StringUtils.isNotEmpty(dbName)) {
- key.append(Base64.encodeBase64URLSafeString(dbName.toLowerCase().getBytes()));
- }
- key.append(DelimiterUtils.FIELD_DELIM);
- if (StringUtils.isNotEmpty(tableName)) {
- key.append(Base64.encodeBase64URLSafeString(tableName.toLowerCase().getBytes()));
- }
-
- StringBuilder value = new StringBuilder();
- if (StringUtils.isNotEmpty(srcFilename)) {
- value.append(srcFilename);
- }
- value.append(DelimiterUtils.FIELD_DELIM);
-
- if (StringUtils.isNotEmpty(tgtFilename)) {
- value.append(tgtFilename);
- }
-
- data.getEventFileMetadata().put(key.toString(), value.toString());
- }
-
- public static void updateEventMetadata(ReplicationEventMetadata data, final ReplicationEventMetadata inputData) {
- if (data == null || data.getEventFileMetadata() == null || inputData == null
- || inputData.getEventFileMetadata() == null || inputData.getEventFileMetadata().isEmpty()) {
- return;
- }
-
- data.getEventFileMetadata().putAll(inputData.getEventFileMetadata());
- }
-
- public Path getSrcFileName(final String identifier) {
- return jobFS.makeQualified(new Path(eventsInputDirPath, identifier + SRCFILE_EXTENSION));
- }
-
- public Path getTargetFileName(final String identifier) {
- return jobFS.makeQualified(new Path(eventsInputDirPath, identifier + TGTFILE_EXTENSION));
- }
-
- public void cleanUpEventInputDir() {
- if (!shouldKeepHistory) {
- try {
- jobFS.delete(eventsInputDirPath, true);
- eventsInputDirPath = null;
- } catch (IOException e) {
- LOG.error("Unable to cleanup: {}", eventsInputDirPath, e);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java
deleted file mode 100644
index d075bfb..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/EventUtils.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.HiveDRArgs;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobStatus;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.tools.DistCp;
-import org.apache.hadoop.tools.DistCpOptions;
-import org.apache.hive.hcatalog.api.repl.Command;
-import org.apache.hive.hcatalog.api.repl.ReplicationUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Utility class to handle Hive events for data-mirroring.
- */
-public class EventUtils {
- private static final String DRIVER_NAME = "org.apache.hive.jdbc.HiveDriver";
- private static final int TIMEOUT_IN_SECS = 300;
- private static final String JDBC_PREFIX = "jdbc:";
- private static final int RETRY_ATTEMPTS = 3;
-
- private Configuration conf = null;
- private String sourceHiveServer2Uri = null;
- private String sourceDatabase = null;
- private String sourceNN = null;
- private String sourceNNKerberosPrincipal = null;
- private String jobNN = null;
- private String jobNNKerberosPrincipal = null;
- private String targetHiveServer2Uri = null;
- private String targetStagingPath = null;
- private String targetNN = null;
- private String targetNNKerberosPrincipal = null;
- private String fullyQualifiedTargetStagingPath = null;
- private List<Path> sourceCleanUpList = null;
- private List<Path> targetCleanUpList = null;
- private static final Logger LOG = LoggerFactory.getLogger(EventUtils.class);
-
- private FileSystem sourceFileSystem = null;
- private FileSystem jobFileSystem = null;
- private FileSystem targetFileSystem = null;
- private Connection sourceConnection = null;
- private Connection targetConnection = null;
- private Statement sourceStatement = null;
- private Statement targetStatement = null;
-
- private Map<String, Long> countersMap = null;
-
- private List<ReplicationStatus> listReplicationStatus;
-
- public EventUtils(Configuration conf) {
- this.conf = conf;
- sourceHiveServer2Uri = conf.get(HiveDRArgs.SOURCE_HS2_URI.getName());
- sourceDatabase = conf.get(HiveDRArgs.SOURCE_DATABASE.getName());
- sourceNN = conf.get(HiveDRArgs.SOURCE_NN.getName());
- sourceNNKerberosPrincipal = conf.get(HiveDRArgs.SOURCE_NN_KERBEROS_PRINCIPAL.getName());
- jobNN = conf.get(HiveDRArgs.JOB_CLUSTER_NN.getName());
- jobNNKerberosPrincipal = conf.get(HiveDRArgs.JOB_CLUSTER_NN_KERBEROS_PRINCIPAL.getName());
- targetHiveServer2Uri = conf.get(HiveDRArgs.TARGET_HS2_URI.getName());
- targetStagingPath = conf.get(HiveDRArgs.TARGET_STAGING_PATH.getName())
- + File.separator + conf.get(HiveDRArgs.JOB_NAME.getName());
- targetNN = conf.get(HiveDRArgs.TARGET_NN.getName());
- targetNNKerberosPrincipal = conf.get(HiveDRArgs.TARGET_NN_KERBEROS_PRINCIPAL.getName());
- sourceCleanUpList = new ArrayList<Path>();
- targetCleanUpList = new ArrayList<Path>();
- countersMap = new HashMap<>();
- }
-
- public void setupConnection() throws Exception {
- Class.forName(DRIVER_NAME);
- DriverManager.setLoginTimeout(TIMEOUT_IN_SECS);
- String authTokenString = ";auth=delegationToken";
- //To bypass findbugs check, need to store empty password in Properties.
- Properties password = new Properties();
- password.put("password", "");
- String user = "";
-
- UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
- if (currentUser != null) {
- user = currentUser.getShortUserName();
- }
-
- if (conf.get(HiveDRArgs.EXECUTION_STAGE.getName())
- .equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())) {
- String connString = JDBC_PREFIX + sourceHiveServer2Uri + "/" + sourceDatabase;
- if (StringUtils.isNotEmpty(conf.get(HiveDRArgs.SOURCE_HIVE2_KERBEROS_PRINCIPAL.getName()))) {
- connString += authTokenString;
- }
- sourceConnection = DriverManager.getConnection(connString, user, password.getProperty("password"));
- sourceStatement = sourceConnection.createStatement();
- } else {
- String connString = JDBC_PREFIX + targetHiveServer2Uri + "/" + sourceDatabase;
- if (StringUtils.isNotEmpty(conf.get(HiveDRArgs.TARGET_HIVE2_KERBEROS_PRINCIPAL.getName()))) {
- connString += authTokenString;
- }
- targetConnection = DriverManager.getConnection(connString, user, password.getProperty("password"));
- targetStatement = targetConnection.createStatement();
- }
- }
-
- public void initializeFS() throws IOException {
- LOG.info("Initializing staging directory");
- fullyQualifiedTargetStagingPath = new Path(targetNN, targetStagingPath).toString();
- sourceFileSystem = FileSystem.get(FileUtils.getConfiguration(sourceNN, sourceNNKerberosPrincipal));
- jobFileSystem = FileSystem.get(FileUtils.getConfiguration(jobNN, jobNNKerberosPrincipal));
- targetFileSystem = FileSystem.get(FileUtils.getConfiguration(targetNN, targetNNKerberosPrincipal));
- }
-
- private String readEvents(Path eventFileName) throws IOException {
- StringBuilder eventString = new StringBuilder();
- BufferedReader in = new BufferedReader(new InputStreamReader(jobFileSystem.open(eventFileName)));
- try {
- String line;
- while ((line=in.readLine())!=null) {
- eventString.append(line);
- eventString.append(DelimiterUtils.NEWLINE_DELIM);
- }
- } catch (Exception e) {
- throw new IOException(e);
- } finally {
- IOUtils.closeQuietly(in);
- }
-
- return eventString.toString();
- }
-
- public void processEvents(String event) throws Exception {
- listReplicationStatus = new ArrayList<ReplicationStatus>();
- String[] eventSplit = event.split(DelimiterUtils.FIELD_DELIM);
- String dbName = new String(Base64.decodeBase64(eventSplit[0]), "UTF-8");
- String tableName = new String(Base64.decodeBase64(eventSplit[1]), "UTF-8");
- String exportEventStr;
- String importEventStr;
- if (conf.get(HiveDRArgs.EXECUTION_STAGE.getName())
- .equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())) {
- exportEventStr = readEvents(new Path(eventSplit[2]));
- if (StringUtils.isNotEmpty(exportEventStr)) {
- LOG.info("Process the export statements for db {} table {}", dbName, tableName);
- processCommands(exportEventStr, dbName, tableName, sourceStatement, sourceCleanUpList, false);
- if (!sourceCleanUpList.isEmpty()) {
- invokeCopy(sourceCleanUpList);
- }
- }
- } else if (conf.get(HiveDRArgs.EXECUTION_STAGE.getName())
- .equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
- importEventStr = readEvents(new Path(eventSplit[3]));
- if (StringUtils.isNotEmpty(importEventStr)) {
- LOG.info("Process the import statements for db {} table {}", dbName, tableName);
- processCommands(importEventStr, dbName, tableName, targetStatement, targetCleanUpList, true);
- }
- }
- }
-
- public List<ReplicationStatus> getListReplicationStatus() {
- return listReplicationStatus;
- }
-
- private void processCommands(String eventStr, String dbName, String tableName, Statement sqlStmt,
- List<Path> cleanUpList, boolean isImportStatements)
- throws SQLException, HiveReplicationException, IOException {
- String[] commandList = eventStr.split(DelimiterUtils.NEWLINE_DELIM);
- List<Command> deserializeCommand = new ArrayList<Command>();
- for (String command : commandList) {
- Command cmd = ReplicationUtils.deserializeCommand(command);
- deserializeCommand.add(cmd);
- List<String> cleanupLocations = cmd.cleanupLocationsAfterEvent();
- cleanUpList.addAll(getCleanUpPaths(cleanupLocations));
- }
- for (Command cmd : deserializeCommand) {
- try {
- LOG.debug("Executing command : {} : {} ", cmd.getEventId(), cmd.toString());
- executeCommand(cmd, dbName, tableName, sqlStmt, isImportStatements, 0);
- } catch (Exception e) {
- // clean up locations before failing.
- cleanupEventLocations(sourceCleanUpList, sourceFileSystem);
- cleanupEventLocations(targetCleanUpList, targetFileSystem);
- throw new HiveReplicationException("Could not process replication command for "
- + " DB Name:" + dbName + ", Table Name:" + tableName, e);
- }
- }
- }
-
- private void executeCommand(Command cmd, String dbName, String tableName,
- Statement sqlStmt, boolean isImportStatements, int attempt)
- throws HiveReplicationException, SQLException, IOException {
- for (final String stmt : cmd.get()) {
- executeSqlStatement(cmd, dbName, tableName, sqlStmt, stmt, isImportStatements, attempt);
- }
- if (isImportStatements) {
- addReplicationStatus(ReplicationStatus.Status.SUCCESS, dbName, tableName, cmd.getEventId());
- }
- }
-
- private void executeSqlStatement(Command cmd, String dbName, String tableName,
- Statement sqlStmt, String stmt, boolean isImportStatements, int attempt)
- throws HiveReplicationException, SQLException, IOException {
- try {
- sqlStmt.execute(stmt);
- } catch (SQLException sqeOuter) {
- // Retry if command is retriable.
- if (attempt < RETRY_ATTEMPTS && cmd.isRetriable()) {
- if (isImportStatements) {
- try {
- cleanupEventLocations(getCleanUpPaths(cmd.cleanupLocationsPerRetry()), targetFileSystem);
- } catch (IOException ioe) {
- // Clean up failed before retry on target. Update failure status and return
- addReplicationStatus(ReplicationStatus.Status.FAILURE, dbName,
- tableName, cmd.getEventId());
- throw ioe;
- }
- } else {
- cleanupEventLocations(getCleanUpPaths(cmd.cleanupLocationsPerRetry()), sourceFileSystem);
- }
- executeCommand(cmd, dbName, tableName, sqlStmt, isImportStatements, ++attempt);
- return; // Retry succeeded, return without throwing an exception.
- }
- // If we reached here, retries have failed.
- LOG.error("SQL Exception: {}", sqeOuter);
- undoCommand(cmd, dbName, tableName, sqlStmt, isImportStatements);
- if (isImportStatements) {
- addReplicationStatus(ReplicationStatus.Status.FAILURE, dbName, tableName, cmd.getEventId());
- }
- throw sqeOuter;
- }
- }
-
- private static List<Path> getCleanUpPaths(List<String> cleanupLocations) {
- List<Path> cleanupLocationPaths = new ArrayList<Path>();
- for (String cleanupLocation : cleanupLocations) {
- cleanupLocationPaths.add(new Path(cleanupLocation));
- }
- return cleanupLocationPaths;
- }
-
- private void undoCommand(Command cmd, String dbName,
- String tableName, Statement sqlStmt, boolean isImportStatements)
- throws SQLException, HiveReplicationException {
- if (cmd.isUndoable()) {
- try {
- List<String> undoCommands = cmd.getUndo();
- LOG.debug("Undo command: {}", StringUtils.join(undoCommands.toArray()));
- if (undoCommands.size() != 0) {
- for (final String undoStmt : undoCommands) {
- sqlStmt.execute(undoStmt);
- }
- }
- } catch (SQLException sqeInner) {
- if (isImportStatements) {
- addReplicationStatus(ReplicationStatus.Status.FAILURE, dbName,
- tableName, cmd.getEventId());
- }
- LOG.error("SQL Exception: {}", sqeInner);
- throw sqeInner;
- }
- }
- }
-
- private void addReplicationStatus(ReplicationStatus.Status status, String dbName, String tableName, long eventId)
- throws HiveReplicationException {
- try {
- String drJobName = conf.get(HiveDRArgs.JOB_NAME.getName());
- ReplicationStatus rs = new ReplicationStatus(conf.get(HiveDRArgs.SOURCE_CLUSTER.getName()),
- conf.get(HiveDRArgs.TARGET_CLUSTER.getName()), drJobName, dbName, tableName, status, eventId);
- listReplicationStatus.add(rs);
- } catch (HiveReplicationException hre) {
- throw new HiveReplicationException("Could not update replication status store for "
- + " EventId:" + eventId
- + " DB Name:" + dbName
- + " Table Name:" + tableName
- + hre.toString());
- }
- }
-
- public void invokeCopy(List<Path> srcStagingPaths) throws Exception {
- DistCpOptions options = getDistCpOptions(srcStagingPaths);
- DistCp distCp = new DistCp(conf, options);
- LOG.info("Started DistCp with source Path: {} \ttarget path: {}", StringUtils.join(srcStagingPaths.toArray()),
- fullyQualifiedTargetStagingPath);
- Job distcpJob = distCp.execute();
- LOG.info("Distp Hadoop job: {}", distcpJob.getJobID().toString());
- LOG.info("Completed DistCp");
- if (distcpJob.getStatus().getState() == JobStatus.State.SUCCEEDED) {
- countersMap = HiveDRUtils.fetchReplicationCounters(conf, distcpJob);
- }
- }
-
- public DistCpOptions getDistCpOptions(List<Path> srcStagingPaths) {
- /*
- * Add the fully qualified sourceNameNode to srcStagingPath uris. This will
- * ensure DistCp will succeed when the job is run on target cluster.
- */
- List<Path> fullyQualifiedSrcStagingPaths = new ArrayList<Path>();
- for (Path srcPath : srcStagingPaths) {
- fullyQualifiedSrcStagingPaths.add(new Path(sourceNN, srcPath.toString()));
- }
- fullyQualifiedSrcStagingPaths.toArray(new Path[fullyQualifiedSrcStagingPaths.size()]);
-
- DistCpOptions distcpOptions = new DistCpOptions(fullyQualifiedSrcStagingPaths,
- new Path(fullyQualifiedTargetStagingPath));
- /* setSyncFolder to false to retain dir structure as in source at the target. If set to true all files will be
- copied to the same staging sir at target resulting in DuplicateFileException in DistCp.
- */
-
- distcpOptions.setSyncFolder(false);
- distcpOptions.setBlocking(true);
- distcpOptions.setMaxMaps(Integer.valueOf(conf.get(HiveDRArgs.DISTCP_MAX_MAPS.getName())));
- distcpOptions.setMapBandwidth(Integer.valueOf(conf.get(HiveDRArgs.DISTCP_MAP_BANDWIDTH.getName())));
- return distcpOptions;
- }
-
- public Long getCounterValue(String counterKey) {
- return countersMap.get(counterKey);
- }
-
- public boolean isCountersMapEmtpy() {
- return countersMap.size() == 0 ? true : false;
- }
-
- public void cleanEventsDirectory() throws IOException {
- LOG.info("Cleaning staging directory");
- cleanupEventLocations(sourceCleanUpList, sourceFileSystem);
- cleanupEventLocations(targetCleanUpList, targetFileSystem);
- }
-
- private void cleanupEventLocations(List<Path> cleanupList, FileSystem fileSystem)
- throws IOException {
- for (Path cleanUpPath : cleanupList) {
- try {
- fileSystem.delete(cleanUpPath, true);
- } catch (IOException ioe) {
- LOG.error("Cleaning up of staging directory {} failed {}", cleanUpPath, ioe.toString());
- throw ioe;
- }
- }
-
- }
-
- public void closeConnection() throws SQLException {
- if (sourceStatement != null) {
- sourceStatement.close();
- }
-
- if (targetStatement != null) {
- targetStatement.close();
- }
-
- if (sourceConnection != null) {
- sourceConnection.close();
- }
- if (targetConnection != null) {
- targetConnection.close();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java
deleted file mode 100644
index 6bd6319..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/FileUtils.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * Utility class to validate HDFS files.
- */
-public final class FileUtils {
-
- public static final String DEFAULT_EVENT_STORE_PATH = DRStatusStore.BASE_DEFAULT_STORE_PATH
- + File.separator + "Events";
- public static final FsPermission FS_PERMISSION_700 = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
-
-
- private FileUtils() {}
-
- public static Configuration getConfiguration(final String writeEP, final String nnKerberosPrincipal) {
- Configuration conf = new Configuration();
- conf.set("fs.defaultFS", writeEP);
- if (StringUtils.isNotEmpty(nnKerberosPrincipal)) {
- conf.set("dfs.namenode.kerberos.principal", nnKerberosPrincipal);
- }
- return conf;
- }
-
- public static void validatePath(final FileSystem fileSystem, final Path basePath) throws IOException {
- if (!fileSystem.exists(basePath)) {
- throw new IOException("Please create base dir " + fileSystem.getUri() + basePath
- + ". Please set group to " + DRStatusStore.getStoreGroup()
- + " and permissions to " + DRStatusStore.DEFAULT_STORE_PERMISSION.toString());
- }
-
- if (!fileSystem.getFileStatus(basePath).getPermission().equals(DRStatusStore.DEFAULT_STORE_PERMISSION)
- || !fileSystem.getFileStatus(basePath).getGroup().equalsIgnoreCase(DRStatusStore.getStoreGroup())) {
- throw new IOException("Base dir " + fileSystem.getUri() + basePath
- + " does not have correct ownership/permissions."
- + " Please set group to " + DRStatusStore.getStoreGroup()
- + " and permissions to " + DRStatusStore.DEFAULT_STORE_PERMISSION.toString());
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java
deleted file mode 100644
index 900afe8..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRStatusStore.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**
- * DRStatusStore implementation for hive.
- */
-public class HiveDRStatusStore extends DRStatusStore {
-
- private static final Logger LOG = LoggerFactory.getLogger(DRStatusStore.class);
- private FileSystem fileSystem;
-
- private static final String DEFAULT_STORE_PATH = BASE_DEFAULT_STORE_PATH + "hiveReplicationStatusStore/";
- private static final FsPermission DEFAULT_STATUS_DIR_PERMISSION =
- new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE);
-
- private static final String LATEST_FILE = "latest.json";
- private static final int FILE_ROTATION_LIMIT = 10;
- private static final int FILE_ROTATION_TIME = 86400000; // 1 day
-
-
- public HiveDRStatusStore(FileSystem targetFileSystem) throws IOException {
- init(targetFileSystem);
- }
-
- public HiveDRStatusStore(FileSystem targetFileSystem, String group) throws IOException {
- HiveDRStatusStore.setStoreGroup(group);
- init(targetFileSystem);
- }
-
- private void init(FileSystem targetFileSystem) throws IOException {
- this.fileSystem = targetFileSystem;
- Path basePath = new Path(BASE_DEFAULT_STORE_PATH);
- FileUtils.validatePath(fileSystem, basePath);
-
- Path storePath = new Path(DEFAULT_STORE_PATH);
- if (!fileSystem.exists(storePath)) {
- if (!FileSystem.mkdirs(fileSystem, storePath, DEFAULT_STORE_PERMISSION)) {
- throw new IOException("mkdir failed for " + DEFAULT_STORE_PATH);
- }
- } else {
- if (!fileSystem.getFileStatus(storePath).getPermission().equals(DEFAULT_STORE_PERMISSION)) {
- throw new IOException("Base dir " + DEFAULT_STORE_PATH + "does not have correct permissions. "
- + "Please set to 777");
- }
- }
- }
-
- /**
- get all DB updated by the job. get all current table statuses for the DB merge the latest repl
- status with prev table repl statuses. If all statuses are success, store the status as success
- with largest eventId for the DB else store status as failure for the DB and lowest eventId.
- */
- @Override
- public void updateReplicationStatus(String jobName, List<ReplicationStatus> statusList)
- throws HiveReplicationException {
- Map<String, DBReplicationStatus> dbStatusMap = new HashMap<String, DBReplicationStatus>();
- for (ReplicationStatus status : statusList) {
- if (!status.getJobName().equals(jobName)) {
- String error = "JobName for status does not match current job \"" + jobName
- + "\". Status is " + status.toJsonString();
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
-
- // init dbStatusMap and tableStatusMap from existing statuses.
- if (!dbStatusMap.containsKey(status.getDatabase())) {
- DBReplicationStatus dbStatus = getDbReplicationStatus(status.getSourceUri(), status.getTargetUri(),
- status.getJobName(), status.getDatabase());
- dbStatusMap.put(status.getDatabase(), dbStatus);
- }
-
- // update existing statuses with new status for db/tables
- if (StringUtils.isEmpty(status.getTable())) { // db level replication status.
- dbStatusMap.get(status.getDatabase()).updateDbStatus(status);
- } else { // table level replication status
- dbStatusMap.get(status.getDatabase()).updateTableStatus(status);
- }
- }
- // write to disk
- for (Map.Entry<String, DBReplicationStatus> entry : dbStatusMap.entrySet()) {
- writeStatusFile(entry.getValue());
- }
- }
-
- @Override
- public ReplicationStatus getReplicationStatus(String source, String target, String jobName, String database)
- throws HiveReplicationException {
- return getReplicationStatus(source, target, jobName, database, null);
- }
-
-
- public ReplicationStatus getReplicationStatus(String source, String target,
- String jobName, String database,
- String table) throws HiveReplicationException {
- if (StringUtils.isEmpty(table)) {
- return getDbReplicationStatus(source, target, jobName, database).getDatabaseStatus();
- } else {
- return getDbReplicationStatus(source, target, jobName, database).getTableStatus(table);
- }
- }
-
- @Override
- public Iterator<ReplicationStatus> getTableReplicationStatusesInDb(String source, String target,
- String jobName, String database)
- throws HiveReplicationException {
- DBReplicationStatus dbReplicationStatus = getDbReplicationStatus(source, target, jobName, database);
- return dbReplicationStatus.getTableStatusIterator();
- }
-
- @Override
- public void deleteReplicationStatus(String jobName, String database) throws HiveReplicationException {
- Path deletePath = getStatusDirPath(database, jobName);
- try {
- if (fileSystem.exists(deletePath)) {
- fileSystem.delete(deletePath, true);
- }
- } catch (IOException e) {
- throw new HiveReplicationException("Failed to delete status for Job "
- + jobName + " and DB "+ database, e);
- }
-
- }
-
- private DBReplicationStatus getDbReplicationStatus(String source, String target, String jobName,
- String database) throws HiveReplicationException{
- DBReplicationStatus dbReplicationStatus = null;
- Path statusDirPath = getStatusDirPath(database, jobName);
- // check if database name or jobName can contain chars not allowed by hdfs dir/file naming.
- // if yes, use md5 of the same for dir names. prefer to use actual db names for readability.
-
- try {
- if (fileSystem.exists(statusDirPath)) {
- dbReplicationStatus = readStatusFile(statusDirPath);
- }
- if (null == dbReplicationStatus) {
- // Init replication state for this database
- ReplicationStatus initDbStatus = new ReplicationStatus(source, target, jobName,
- database, null, ReplicationStatus.Status.INIT, -1);
- dbReplicationStatus = new DBReplicationStatus(initDbStatus);
- if (!FileSystem.mkdirs(fileSystem, statusDirPath, DEFAULT_STATUS_DIR_PERMISSION)) {
- String error = "mkdir failed for " + statusDirPath.toString();
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
- writeStatusFile(dbReplicationStatus);
- }
- return dbReplicationStatus;
- } catch (IOException e) {
- String error = "Failed to get ReplicationStatus for job " + jobName;
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
- }
-
- private Path getStatusDirPath(DBReplicationStatus dbReplicationStatus) {
- ReplicationStatus status = dbReplicationStatus.getDatabaseStatus();
- return getStatusDirPath(status.getDatabase(), status.getJobName());
- }
-
- public Path getStatusDirPath(String database, String jobName) {
- return new Path(DEFAULT_STORE_PATH + "/" + database.toLowerCase() + "/" + jobName);
- }
-
- private void writeStatusFile(DBReplicationStatus dbReplicationStatus) throws HiveReplicationException {
- dbReplicationStatus.updateDbStatusFromTableStatuses();
- String statusDir = getStatusDirPath(dbReplicationStatus).toString();
- try {
- Path latestFile = new Path(statusDir + "/" + LATEST_FILE);
- if (fileSystem.exists(latestFile)) {
- Path renamedFile = new Path(statusDir + "/"
- + String.valueOf(fileSystem.getFileStatus(latestFile).getModificationTime()) + ".json");
- fileSystem.rename(latestFile, renamedFile);
- }
-
- FSDataOutputStream stream = FileSystem.create(fileSystem, latestFile, DEFAULT_STATUS_DIR_PERMISSION);
- stream.write(dbReplicationStatus.toJsonString().getBytes());
- stream.close();
-
- } catch (IOException e) {
- String error = "Failed to write latest Replication status into dir " + statusDir;
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
-
- rotateStatusFiles(new Path(statusDir), FILE_ROTATION_LIMIT, FILE_ROTATION_TIME);
- }
-
- public void rotateStatusFiles(Path statusDir, int numFiles, int maxFileAge) throws HiveReplicationException {
-
- List<String> fileList = new ArrayList<String>();
- long now = System.currentTimeMillis();
- try {
- RemoteIterator<LocatedFileStatus> fileIterator = fileSystem.listFiles(statusDir, false);
- while (fileIterator.hasNext()) {
- fileList.add(fileIterator.next().getPath().toString());
- }
- if (fileList.size() > (numFiles+1)) {
- // delete some files, as long as they are older than the time.
- Collections.sort(fileList);
- for (String file : fileList.subList(0, (fileList.size() - numFiles + 1))) {
- long modTime = fileSystem.getFileStatus(new Path(file)).getModificationTime();
- if ((now - modTime) > maxFileAge) {
- Path deleteFilePath = new Path(file);
- if (fileSystem.exists(deleteFilePath)) {
- fileSystem.delete(deleteFilePath, false);
- }
- }
- }
- }
- } catch (IOException e) {
- String error = "Failed to rotate status files in dir " + statusDir.toString();
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
- }
-
- private DBReplicationStatus readStatusFile(Path statusDirPath) throws HiveReplicationException {
- try {
- Path statusFile = new Path(statusDirPath.toString() + "/" + LATEST_FILE);
- if ((!fileSystem.exists(statusDirPath)) || (!fileSystem.exists(statusFile))) {
- return null;
- } else {
- return new DBReplicationStatus(IOUtils.toString(fileSystem.open(statusFile)));
- }
- } catch (IOException e) {
- String error = "Failed to read latest Replication status from dir " + statusDirPath.toString();
- LOG.error(error);
- throw new HiveReplicationException(error);
- }
- }
-
- public void checkForReplicationConflict(String newSource, String jobName,
- String database, String table) throws HiveReplicationException {
- try {
- Path globPath = new Path(DEFAULT_STORE_PATH + "/" + database.toLowerCase() + "/*/latest.json");
- FileStatus[] files = fileSystem.globStatus(globPath);
- for(FileStatus file : files) {
- DBReplicationStatus dbFileStatus = new DBReplicationStatus(IOUtils.toString(
- fileSystem.open(file.getPath())));
- ReplicationStatus existingJob = dbFileStatus.getDatabaseStatus();
-
- if (!(newSource.equals(existingJob.getSourceUri()))) {
- throw new HiveReplicationException("Two different sources are attempting to replicate to same db "
- + database + ". New Source = " + newSource
- + ", Existing Source = " + existingJob.getSourceUri());
- } // two different sources replicating to same DB. Conflict
- if (jobName.equals(existingJob.getJobName())) {
- continue;
- } // same job, no conflict.
-
- if (StringUtils.isEmpty(table)) {
- // When it is DB level replication, two different jobs cannot replicate to same DB
- throw new HiveReplicationException("Two different jobs are attempting to replicate to same db "
- + database.toLowerCase() + ". New Job = " + jobName
- + ", Existing Job = " + existingJob.getJobName());
- }
-
- /*
- At this point, it is different table level jobs replicating from same newSource to same target. This is
- allowed as long as the target tables are different. For example, job1 can replicate db1.table1 and
- job2 can replicate db1.table2. Both jobs cannot replicate to same table.
- */
- for(Map.Entry<String, ReplicationStatus> entry : dbFileStatus.getTableStatuses().entrySet()) {
- if (table.equals(entry.getKey())) {
- throw new HiveReplicationException("Two different jobs are trying to replicate to same table "
- + entry.getKey() + ". New job = " + jobName
- + ", Existing job = " + existingJob.getJobName());
- }
- }
- }
- } catch (IOException e) {
- throw new HiveReplicationException("Failed to read status files for DB "
- + database, e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRUtils.java
deleted file mode 100644
index dff0803..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveDRUtils.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.falcon.job.JobCounters;
-import org.apache.falcon.job.JobCountersHandler;
-import org.apache.falcon.job.JobType;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.util.Shell;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Hive replication utility class.
- */
-public final class HiveDRUtils {
- /**
- * Enum for Hive replication type.
- */
- public enum ReplicationType {
- TABLE,
- DB
- }
-
- /**
- * Enum for hive-dr action type.
- */
- public enum ExecutionStage {
- IMPORT,
- EXPORT,
- LASTEVENTS
- }
-
- private static final String ALL_TABLES = "*";
-
- public static final String SEPARATOR = File.separator;
-
- private HiveDRUtils() {}
-
- public static ReplicationType getReplicationType(List<String> sourceTables) {
- return (sourceTables.size() == 1 && sourceTables.get(0).equals(ALL_TABLES)) ? ReplicationType.DB
- : ReplicationType.TABLE;
- }
-
- public static Configuration getDefaultConf() throws IOException {
- Configuration conf = new Configuration();
- conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
- String delegationToken = getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
- if (delegationToken != null) {
- conf.set("mapreduce.job.credentials.binary", delegationToken);
- conf.set("tez.credentials.path", delegationToken);
- }
- return conf;
- }
-
- public static String getFilePathFromEnv(String env) {
- String path = System.getenv(env);
- if (path != null && Shell.WINDOWS) {
- // In Windows, file paths are enclosed in \" so remove them here
- // to avoid path errors
- if (path.charAt(0) == '"') {
- path = path.substring(1);
- }
- if (path.charAt(path.length() - 1) == '"') {
- path = path.substring(0, path.length() - 1);
- }
- }
- return path;
- }
-
- public static Map<String, Long> fetchReplicationCounters(Configuration conf,
- Job job) throws IOException, InterruptedException {
- JobCounters hiveReplicationCounters = JobCountersHandler.getCountersType(
- JobType.HIVEREPLICATION.name());
- hiveReplicationCounters.obtainJobCounters(conf, job, true);
- return hiveReplicationCounters.getCountersMap();
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveMetastoreUtils.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveMetastoreUtils.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveMetastoreUtils.java
deleted file mode 100644
index ea19f09..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/HiveMetastoreUtils.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory;
-import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-
-/**
- * Create hive metastore client for user.
- */
-public final class HiveMetastoreUtils {
-
- private static final Logger LOG = LoggerFactory.getLogger(HiveMetastoreUtils.class);
-
- private HiveMetastoreUtils() {}
-
- public static HCatClient initializeHiveMetaStoreClient(String metastoreUri, String metastorePrincipal,
- String hive2Principal) throws Exception {
- try {
- HiveConf hcatConf = createHiveConf(HiveDRUtils.getDefaultConf(),
- metastoreUri, metastorePrincipal, hive2Principal);
- HCatClient client = HCatClient.create(hcatConf);
- return client;
- } catch (IOException e) {
- throw new Exception("Exception creating HCatClient: " + e.getMessage(), e);
- }
- }
-
- private static HiveConf createHiveConf(Configuration conf, String metastoreUrl, String metastorePrincipal,
- String hive2Principal) throws IOException {
- JobConf jobConf = new JobConf(conf);
- String delegationToken = HiveDRUtils.getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
- if (delegationToken != null) {
- Credentials credentials = Credentials.readTokenStorageFile(new File(delegationToken), conf);
- jobConf.setCredentials(credentials);
- UserGroupInformation.getCurrentUser().addCredentials(credentials);
- }
-
- HiveConf hcatConf = new HiveConf(jobConf, HiveConf.class);
-
- hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUrl);
- hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
- hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
- HCatSemanticAnalyzer.class.getName());
- hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-
- hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
- hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
- hcatConf.set(HiveConf.ConfVars.HIVE_REPL_TASK_FACTORY.varname, EximReplicationTaskFactory.class.getName());
- if (StringUtils.isNotEmpty(metastorePrincipal)) {
- hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, metastorePrincipal);
- hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
- hcatConf.set(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
- hcatConf.set("hadoop.rpc.protection", "authentication");
- }
- if (StringUtils.isNotEmpty(hive2Principal)) {
- hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname, hive2Principal);
- hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, "kerberos");
- }
-
- return hcatConf;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/java/org/apache/falcon/hive/util/ReplicationStatus.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/ReplicationStatus.java b/addons/hivedr/src/main/java/org/apache/falcon/hive/util/ReplicationStatus.java
deleted file mode 100644
index bb33772..0000000
--- a/addons/hivedr/src/main/java/org/apache/falcon/hive/util/ReplicationStatus.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive.util;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-
-/**
- * Object to store replication status of a DB or a table.
- */
-public class ReplicationStatus {
-
- public static final int INDENT_FACTOR = 4;
- private static final String SOURCE = "sourceUri";
- private static final String TARGET = "targetUri";
- private static final String JOB_NAME = "jobName";
- private static final String DATABASE = "database";
- private static final String TABLE = "table";
- private static final String EVENT_ID = "eventId";
- private static final String STATUS_KEY = "status";
- private static final String STATUS_LOG = "statusLog";
-
- /**
- * Replication Status enum.
- */
- public static enum Status {
- INIT,
- SUCCESS,
- FAILURE
- }
-
- private String sourceUri;
- private String targetUri;
- private String jobName;
- private String database;
- private String table;
- private Status status = Status.SUCCESS;
- private long eventId = -1;
- private String log;
-
- //SUSPEND CHECKSTYLE CHECK ParameterNumberCheck
- public ReplicationStatus(String sourceUri, String targetUri, String jobName,
- String database, String table,
- ReplicationStatus.Status status, long eventId) throws HiveReplicationException {
- init(sourceUri, targetUri, jobName, database, table, status, eventId, null);
- }
-
- private void init(String source, String target, String job,
- String dbName, String tableName, ReplicationStatus.Status replStatus,
- long eventNum, String logStr) throws HiveReplicationException {
- setSourceUri(source);
- setTargetUri(target);
- setJobName(job);
- setDatabase(dbName);
- setTable(tableName);
- setStatus(replStatus);
- setEventId(eventNum);
- setLog(logStr);
- }
- //RESUME CHECKSTYLE CHECK ParameterNumberCheck
-
- public ReplicationStatus(String jsonString) throws HiveReplicationException {
- try {
- JSONObject object = new JSONObject(jsonString);
- Status objectStatus;
- try {
- objectStatus = ReplicationStatus.Status.valueOf(object.getString(STATUS_KEY).toUpperCase());
- } catch (IllegalArgumentException e1) {
- throw new HiveReplicationException("Unable to deserialize jsonString to ReplicationStatus."
- + " Invalid status " + object.getString(STATUS_KEY), e1);
- }
-
- init(object.getString(SOURCE), object.getString(TARGET), object.getString(JOB_NAME),
- object.getString(DATABASE), object.has(TABLE) ? object.getString(TABLE) : null,
- objectStatus, object.has(EVENT_ID) ? object.getLong(EVENT_ID) : -1,
- object.has(STATUS_LOG) ? object.getString(STATUS_LOG) : null);
- } catch (JSONException e) {
- throw new HiveReplicationException("Unable to deserialize jsonString to ReplicationStatus ", e);
- }
-
- }
-
- public String toJsonString() throws HiveReplicationException {
- try {
- return toJsonObject().toString(INDENT_FACTOR);
- } catch (JSONException e) {
- throw new HiveReplicationException("Unable to serialize ReplicationStatus ", e);
- }
- }
-
- public JSONObject toJsonObject() throws HiveReplicationException {
- JSONObject jsonObject = new JSONObject();
- try {
- jsonObject.put(SOURCE, this.sourceUri);
- jsonObject.put(TARGET, this.targetUri);
- jsonObject.put(JOB_NAME, this.jobName);
- jsonObject.put(DATABASE, this.database);
- if (StringUtils.isNotEmpty(this.table)) {
- jsonObject.put(TABLE, this.table);
- }
- jsonObject.put(STATUS_KEY, this.status.name());
- if (this.eventId > -1) {
- jsonObject.put(EVENT_ID, this.eventId);
- } else {
- jsonObject.put(EVENT_ID, -1);
- }
- if (StringUtils.isNotEmpty(this.log)) {
- jsonObject.put(STATUS_LOG, this.log);
- }
- return jsonObject;
- } catch (JSONException e) {
- throw new HiveReplicationException("Unable to serialize ReplicationStatus ", e);
- }
- }
-
- public String getSourceUri() {
- return this.sourceUri;
- }
-
- public void setSourceUri(String source) throws HiveReplicationException {
- validateString(SOURCE, source);
- this.sourceUri = source;
- }
-
- public String getTargetUri() {
- return this.targetUri;
- }
-
- public void setTargetUri(String target) throws HiveReplicationException {
- validateString(TARGET, target);
- this.targetUri = target;
- }
-
- public String getJobName() {
- return this.jobName;
- }
-
- public void setJobName(String jobName) throws HiveReplicationException {
- validateString(JOB_NAME, jobName);
- this.jobName = jobName;
- }
-
- public String getDatabase() {
- return this.database;
- }
-
- public void setDatabase(String database) throws HiveReplicationException {
- validateString(DATABASE, database);
- this.database = database.toLowerCase();
- }
-
- public String getTable() {
- return this.table;
- }
-
- public void setTable(String table) {
- this.table = (table == null) ? null : table.toLowerCase();
- }
-
- public Status getStatus() {
- return this.status;
- }
-
- public void setStatus(Status status) throws HiveReplicationException {
- if (status != null) {
- this.status = status;
- } else {
- throw new HiveReplicationException("Failed to set ReplicationStatus. Input \""
- + STATUS_KEY + "\" cannot be empty");
- }
- }
-
- public long getEventId() {
- return this.eventId;
- }
-
- public void setEventId(long eventId) throws HiveReplicationException {
- if (eventId > -1) {
- this.eventId = eventId;
- }
- }
-
- public String getLog() {
- return this.log;
- }
-
- public void setLog(String log) {
- this.log = log;
- }
-
- private void validateString(String inputName, String input) throws HiveReplicationException {
- if (StringUtils.isEmpty(input)) {
- throw new HiveReplicationException("Failed to set ReplicationStatus. Input \""
- + inputName + "\" cannot be empty");
- }
- }
-
- public String toString() {
- return sourceUri + "\t" + targetUri + "\t" + jobName + "\t"
- + database + "\t"+ table + "\t" + status + "\t"+ eventId;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/main/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/main/resources/log4j.xml b/addons/hivedr/src/main/resources/log4j.xml
deleted file mode 100644
index f83a9a9..0000000
--- a/addons/hivedr/src/main/resources/log4j.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<!--
- This is used for falcon packaging only.
- -->
-
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
- <appender name="console" class="org.apache.log4j.ConsoleAppender">
- <param name="Target" value="System.out"/>
- <layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
- </layout>
- </appender>
-
- <logger name="org.apache.falcon" additivity="false">
- <level value="debug"/>
- <appender-ref ref="console"/>
- </logger>
-
- <logger name="org.apache.hadoop" additivity="false">
- <level value="info"/>
- <appender-ref ref="console"/>
- </logger>
-
- <logger name="org.apache.hadoop.hive" additivity="false">
- <level value="info"/>
- <appender-ref ref="console"/>
- </logger>
-
- <root>
- <priority value="info"/>
- <appender-ref ref="console"/>
- </root>
-
-</log4j:configuration>
[7/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
Removing addons/ non-docs directory from asf-site branch
Project: http://git-wip-us.apache.org/repos/asf/falcon/repo
Commit: http://git-wip-us.apache.org/repos/asf/falcon/commit/6f5b476c
Tree: http://git-wip-us.apache.org/repos/asf/falcon/tree/6f5b476c
Diff: http://git-wip-us.apache.org/repos/asf/falcon/diff/6f5b476c
Branch: refs/heads/asf-site
Commit: 6f5b476ccd8fa4ff1e9aea36d44a85309a9b932e
Parents: 8609ffd
Author: Pallavi Rao <pa...@inmobi.com>
Authored: Tue Mar 1 12:54:02 2016 +0530
Committer: Pallavi Rao <pa...@inmobi.com>
Committed: Tue Mar 1 12:54:02 2016 +0530
----------------------------------------------------------------------
addons/adf/README | 59 --
addons/adf/pom.xml | 112 ---
.../apache/falcon/adfservice/ADFHiveJob.java | 123 ----
.../org/apache/falcon/adfservice/ADFJob.java | 556 ---------------
.../apache/falcon/adfservice/ADFJobFactory.java | 43 --
.../org/apache/falcon/adfservice/ADFPigJob.java | 70 --
.../falcon/adfservice/ADFProviderService.java | 370 ----------
.../falcon/adfservice/ADFReplicationJob.java | 71 --
.../falcon/adfservice/ADFScheduledExecutor.java | 71 --
.../org/apache/falcon/adfservice/DataFeed.java | 110 ---
.../java/org/apache/falcon/adfservice/Feed.java | 39 -
.../org/apache/falcon/adfservice/Process.java | 148 ----
.../org/apache/falcon/adfservice/TableFeed.java | 125 ----
.../adfservice/util/ADFJsonConstants.java | 73 --
.../apache/falcon/adfservice/util/FSUtils.java | 102 ---
addons/designer/actions/pom.xml | 46 --
.../configuration/EmailActionConfiguration.java | 74 --
.../designer/primitive/action/EmailAction.java | 92 ---
addons/designer/checkstyle/pom.xml | 28 -
.../resources/falcon/checkstyle-java-header.txt | 17 -
.../resources/falcon/checkstyle-noframes.xsl | 218 ------
.../src/main/resources/falcon/checkstyle.xml | 233 ------
.../main/resources/falcon/findbugs-exclude.xml | 34 -
addons/designer/common/pom.xml | 42 --
addons/designer/core/pom.xml | 81 ---
.../configuration/ActionConfiguration.java | 32 -
.../designer/configuration/Configuration.java | 81 ---
.../designer/configuration/FlowConfig.java | 69 --
.../designer/configuration/SerdeException.java | 61 --
.../configuration/TransformConfiguration.java | 33 -
.../falcon/designer/primitive/Action.java | 102 ---
.../apache/falcon/designer/primitive/Code.java | 27 -
.../primitive/CompilationException.java | 60 --
.../falcon/designer/primitive/Message.java | 67 --
.../falcon/designer/primitive/Primitive.java | 159 -----
.../falcon/designer/primitive/Transform.java | 88 ---
.../falcon/designer/schema/RelationalData.java | 53 --
.../designer/schema/RelationalSchema.java | 84 ---
.../falcon/designer/source/DataSource.java | 29 -
.../apache/falcon/designer/storage/Storage.java | 67 --
.../designer/storage/StorageException.java | 63 --
.../falcon/designer/storage/Storeable.java | 52 --
.../apache/falcon/designer/storage/Version.java | 71 --
.../designer/storage/VersionedStorage.java | 111 ---
.../designer/storage/impl/HDFSStorage.java | 98 ---
.../designer/storage/impl/HDFSStorageTest.java | 78 --
addons/designer/flows/pom.xml | 46 --
.../apache/falcon/designer/primitive/Flow.java | 83 ---
addons/designer/pom.xml | 709 -------------------
addons/designer/transforms/pom.xml | 42 --
addons/designer/ui/pom.xml | 95 ---
.../designer/ui/src/main/webapp/WEB-INF/web.xml | 49 --
addons/hivedr/README | 80 ---
addons/hivedr/pom.xml | 209 ------
.../apache/falcon/hive/DefaultPartitioner.java | 317 ---------
.../org/apache/falcon/hive/EventSourcer.java | 31 -
.../java/org/apache/falcon/hive/HiveDRArgs.java | 122 ----
.../org/apache/falcon/hive/HiveDROptions.java | 183 -----
.../java/org/apache/falcon/hive/HiveDRTool.java | 393 ----------
.../falcon/hive/LastReplicatedEvents.java | 196 -----
.../falcon/hive/MetaStoreEventSourcer.java | 204 ------
.../org/apache/falcon/hive/Partitioner.java | 42 --
.../falcon/hive/ReplicationEventMetadata.java | 34 -
.../exception/HiveReplicationException.java | 49 --
.../falcon/hive/mapreduce/CopyCommitter.java | 65 --
.../falcon/hive/mapreduce/CopyMapper.java | 104 ---
.../falcon/hive/mapreduce/CopyReducer.java | 85 ---
.../falcon/hive/util/DBReplicationStatus.java | 213 ------
.../apache/falcon/hive/util/DRStatusStore.java | 104 ---
.../apache/falcon/hive/util/DelimiterUtils.java | 30 -
.../falcon/hive/util/EventSourcerUtils.java | 189 -----
.../org/apache/falcon/hive/util/EventUtils.java | 393 ----------
.../org/apache/falcon/hive/util/FileUtils.java | 68 --
.../falcon/hive/util/HiveDRStatusStore.java | 315 --------
.../apache/falcon/hive/util/HiveDRUtils.java | 99 ---
.../falcon/hive/util/HiveMetastoreUtils.java | 92 ---
.../falcon/hive/util/ReplicationStatus.java | 221 ------
addons/hivedr/src/main/resources/log4j.xml | 54 --
.../falcon/hive/DBReplicationStatusTest.java | 230 ------
.../java/org/apache/falcon/hive/DRTest.java | 45 --
.../falcon/hive/HiveDRStatusStoreTest.java | 343 ---------
.../java/org/apache/falcon/hive/HiveDRTest.java | 252 -------
.../falcon/hive/ReplicationStatusTest.java | 137 ----
addons/recipes/hdfs-replication/README.txt | 29 -
addons/recipes/hdfs-replication/pom.xml | 32 -
.../resources/hdfs-replication-template.xml | 44 --
.../resources/hdfs-replication-workflow.xml | 82 ---
.../main/resources/hdfs-replication.properties | 79 ---
.../recipes/hive-disaster-recovery/README.txt | 58 --
addons/recipes/hive-disaster-recovery/pom.xml | 32 -
.../hive-disaster-recovery-secure-template.xml | 45 --
.../hive-disaster-recovery-secure-workflow.xml | 357 ----------
.../hive-disaster-recovery-secure.properties | 108 ---
.../hive-disaster-recovery-template.xml | 45 --
.../hive-disaster-recovery-workflow.xml | 249 -------
.../resources/hive-disaster-recovery.properties | 98 ---
96 files changed, 11603 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/README
----------------------------------------------------------------------
diff --git a/addons/adf/README b/addons/adf/README
deleted file mode 100644
index 39883b8..0000000
--- a/addons/adf/README
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ADF Provider
-=======================
-
-
-Overview
----------
-
-This integration allows Microsoft Azure Data Factory pipelines to invoke Falcon activities
-(i.e. replication, hive and pig proessing work), so the user can build a hybrid Hadoop data pipelines
-leveraging on-premises Hadoop clusters and cloud based Cortana Analytics services
-like HDInsight Hadoop clusters and Azure Machine Learning.
-
-
-Usage
----------
-
-Falcon reads Azure Service Bus credentials from conf/startup.properties when it starts.
-So, the credential needs to be added before starting Falcon,
-and Falcon needs to be restarted if there is any change in the credential.
-
-Example:
-
-######### ADF Configurations start #########
-
-# A String object that represents the namespace
-*.microsoft.windowsazure.services.servicebus.namespace=hwtransport
-
-# Request and status queues on the namespace
-*.microsoft.windowsazure.services.servicebus.requestqueuename=adfrequest
-*.microsoft.windowsazure.services.servicebus.statusqueuename=adfstatus
-
-# A String object that contains the SAS key name
-*.microsoft.windowsazure.services.servicebus.sasKeyName=RootManageSharedAccessKey
-
-# A String object that contains the SAS key
-*.microsoft.windowsazure.services.servicebus.sasKey=4kt2x6yEoWZZSFZofyXEoxly0knHL7FPMqLD14ov1jo=
-
-# A String object containing the base URI that is added to your Service Bus namespace to form the URI to connect
-# to the Service Bus service. To access the default public Azure service, pass ".servicebus.windows.net"
-*.microsoft.windowsazure.services.servicebus.serviceBusRootUri=.servicebus.windows.net
-
-# Service bus polling frequency (in seconds)
-*.microsoft.windowsazure.services.servicebus.polling.frequency=60
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/pom.xml
----------------------------------------------------------------------
diff --git a/addons/adf/pom.xml b/addons/adf/pom.xml
deleted file mode 100644
index 898791e..0000000
--- a/addons/adf/pom.xml
+++ /dev/null
@@ -1,112 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-main</artifactId>
- <version>0.10-SNAPSHOT</version>
- <relativePath>../../pom.xml</relativePath>
- </parent>
- <artifactId>falcon-adf</artifactId>
- <description>Apache Falcon ADF Integration</description>
- <name>Apache Falcon ADF Integration</name>
- <packaging>jar</packaging>
-
- <properties>
- <azure.version>0.8.0</azure.version>
- </properties>
-
- <dependencies>
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-common</artifactId>
- <exclusions>
- <exclusion>
- <groupId>javax.servlet.jsp</groupId>
- <artifactId>jsp-api</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-prism</artifactId>
- <version>${project.version}</version>
- <classifier>classes</classifier>
- </dependency>
-
- <dependency>
- <groupId>com.microsoft.azure</groupId>
- <artifactId>azure-servicebus</artifactId>
- <version>${azure.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-core-asl</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-mapper-asl</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpclient</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- </dependencies>
-
- <profiles>
- <profile>
- <id>hadoop-2</id>
- <activation>
- <activeByDefault>true</activeByDefault>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- </dependency>
- </dependencies>
- </profile>
- </profiles>
-
- <build>
- <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
- <!--<testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>-->
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-jar-plugin</artifactId>
- <executions>
- <execution>
- <goals>
- <goal>test-jar</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFHiveJob.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFHiveJob.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFHiveJob.java
deleted file mode 100644
index 6412c73..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFHiveJob.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.adfservice.util.ADFJsonConstants;
-import org.apache.falcon.FalconException;
-import org.json.JSONException;
-import org.json.JSONObject;
-
-/**
- * Azure ADF Hive Job.
- */
-public class ADFHiveJob extends ADFJob {
- private static final String HIVE_SCRIPT_EXTENSION = ".hql";
- private static final String ENGINE_TYPE = "hive";
- private static final String INPUT_FEED_SUFFIX = "-hive-input-feed";
- private static final String OUTPUT_FEED_SUFFIX = "-hive-output-feed";
-
- private String hiveScriptPath;
- private TableFeed inputFeed;
- private TableFeed outputFeed;
-
- public ADFHiveJob(String message, String id) throws FalconException {
- super(message, id);
- type = JobType.HIVE;
- inputFeed = getInputTableFeed();
- outputFeed = getOutputTableFeed();
- hiveScriptPath = activityHasScriptPath() ? getScriptPath() : createScriptFile(HIVE_SCRIPT_EXTENSION);
- }
-
- @Override
- public void startJob() throws FalconException {
- startProcess(inputFeed, outputFeed, ENGINE_TYPE, hiveScriptPath);
- }
-
- @Override
- public void cleanup() throws FalconException {
- cleanupProcess(inputFeed, outputFeed);
- }
-
- private TableFeed getInputTableFeed() throws FalconException {
- return getTableFeed(jobEntityName() + INPUT_FEED_SUFFIX, getInputTables().get(0),
- getTableCluster(getInputTables().get(0)));
- }
-
- private TableFeed getOutputTableFeed() throws FalconException {
- return getTableFeed(jobEntityName() + OUTPUT_FEED_SUFFIX, getOutputTables().get(0),
- getTableCluster(getOutputTables().get(0)));
- }
-
- private TableFeed getTableFeed(final String feedName, final String tableName,
- final String clusterName) throws FalconException {
- JSONObject tableExtendedProperties = getTableExtendedProperties(tableName);
- String tableFeedName;
- String partitions;
-
- try {
- tableFeedName = tableExtendedProperties.getString(ADFJsonConstants.ADF_REQUEST_TABLE_NAME);
- if (StringUtils.isBlank(tableFeedName)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_TABLE_NAME + " cannot"
- + " be empty in ADF request.");
- }
- partitions = tableExtendedProperties.getString(ADFJsonConstants.ADF_REQUEST_TABLE_PARTITION);
- if (StringUtils.isBlank(partitions)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_TABLE_PARTITION + " cannot"
- + " be empty in ADF request.");
- }
- } catch (JSONException e) {
- throw new FalconException("Error while parsing ADF JSON message: " + tableExtendedProperties, e);
- }
-
- return new TableFeed.Builder().withFeedName(feedName).withFrequency(frequency)
- .withClusterName(clusterName).withStartTime(startTime).withEndTime(endTime).
- withAclOwner(proxyUser).withTableName(tableFeedName).withPartitions(partitions).build();
- }
-
- private JSONObject getTableExtendedProperties(final String tableName) throws FalconException {
- JSONObject table = tablesMap.get(tableName);
- if (table == null) {
- throw new FalconException("JSON object table " + tableName + " not found in ADF request.");
- }
-
- try {
- JSONObject tableProperties = table.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES);
- if (tableProperties == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_PROPERTIES
- + " not found in ADF request.");
- }
- JSONObject tablesLocation = tableProperties.getJSONObject(ADFJsonConstants.ADF_REQUEST_LOCATION);
- if (tablesLocation == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_LOCATION
- + " not found in ADF request.");
- }
-
- JSONObject tableExtendedProperties = tablesLocation.getJSONObject(ADFJsonConstants.
- ADF_REQUEST_EXTENDED_PROPERTIES);
- if (tableExtendedProperties == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES
- + " not found in ADF request.");
- }
- return tableExtendedProperties;
- } catch (JSONException e) {
- throw new FalconException("Error while parsing ADF JSON message: " + table, e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJob.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJob.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJob.java
deleted file mode 100644
index 5d81338..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJob.java
+++ /dev/null
@@ -1,556 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.adfservice.util.ADFJsonConstants;
-import org.apache.falcon.adfservice.util.FSUtils;
-import org.apache.falcon.entity.EntityUtil;
-import org.apache.falcon.entity.v0.Entity;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.resource.AbstractSchedulableEntityManager;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.hadoop.fs.Path;
-import org.json.JSONArray;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Base class for Azure ADF jobs.
- */
-public abstract class ADFJob {
- private static final Logger LOG = LoggerFactory.getLogger(ADFJob.class);
-
- // name prefix for all adf related entity, e.g. an adf hive process and the feeds associated with it
- public static final String ADF_ENTITY_NAME_PREFIX = "ADF-";
- public static final int ADF_ENTITY_NAME_PREFIX_LENGTH = ADF_ENTITY_NAME_PREFIX.length();
- // name prefix for all adf related job entity, i.e. adf hive/pig process and replication feed
- public static final String ADF_JOB_ENTITY_NAME_PREFIX = ADF_ENTITY_NAME_PREFIX + "JOB-";
- public static final int ADF_JOB_ENTITY_NAME_PREFIX_LENGTH = ADF_JOB_ENTITY_NAME_PREFIX.length();
-
- public static final String TEMPLATE_PATH_PREFIX = "/apps/falcon/adf/";
- public static final String PROCESS_SCRIPTS_PATH = TEMPLATE_PATH_PREFIX
- + Path.SEPARATOR + "generatedscripts";
- private static final String DEFAULT_FREQUENCY = "days(1)";
-
- public static boolean isADFJobEntity(String entityName) {
- return entityName.startsWith(ADF_JOB_ENTITY_NAME_PREFIX);
- }
-
- public static String getSessionID(String entityName) throws FalconException {
- if (!isADFJobEntity(entityName)) {
- throw new FalconException("The entity, " + entityName + ", is not an ADF Job Entity.");
- }
- return entityName.substring(ADF_JOB_ENTITY_NAME_PREFIX_LENGTH);
- }
-
- /**
- * Enum for job type.
- */
- public static enum JobType {
- HIVE, PIG, REPLICATION
- }
-
- private static enum RequestType {
- HADOOPMIRROR, HADOOPHIVE, HADOOPPIG
- }
-
- public static JobType getJobType(String msg) throws FalconException {
- try {
- JSONObject obj = new JSONObject(msg);
- JSONObject activity = obj.getJSONObject(ADFJsonConstants.ADF_REQUEST_ACTIVITY);
- if (activity == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_ACTIVITY + " not found in ADF"
- + " request.");
- }
-
- JSONObject activityProperties = activity.getJSONObject(ADFJsonConstants.ADF_REQUEST_TRANSFORMATION);
- if (activityProperties == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_TRANSFORMATION + " not found "
- + "in ADF request.");
- }
-
- String type = activityProperties.getString(ADFJsonConstants.ADF_REQUEST_TYPE);
- if (StringUtils.isBlank(type)) {
- throw new FalconException(ADFJsonConstants.ADF_REQUEST_TYPE + " not found in ADF request msg");
- }
-
- switch (RequestType.valueOf(type.toUpperCase())) {
- case HADOOPMIRROR:
- return JobType.REPLICATION;
- case HADOOPHIVE:
- return JobType.HIVE;
- case HADOOPPIG:
- return JobType.PIG;
- default:
- throw new FalconException("Unrecognized ADF job type: " + type);
- }
- } catch (JSONException e) {
- throw new FalconException("Error while parsing ADF JSON message: " + msg, e);
- }
- }
-
- public abstract void startJob() throws FalconException;
- public abstract void cleanup() throws FalconException;
-
- protected JSONObject message;
- protected JSONObject activity;
- protected JSONObject activityExtendedProperties;
- protected String id;
- protected JobType type;
- protected String startTime, endTime;
- protected String frequency;
- protected String proxyUser;
- protected long timeout;
- protected ADFJobManager jobManager = new ADFJobManager();
-
- private Map<String, JSONObject> linkedServicesMap = new HashMap<String, JSONObject>();
- protected Map<String, JSONObject> tablesMap = new HashMap<String, JSONObject>();
-
- public ADFJob(String msg, String id) throws FalconException {
- this.id = id;
- FSUtils.createDir(new Path(PROCESS_SCRIPTS_PATH));
- try {
- message = new JSONObject(msg);
-
- frequency = DEFAULT_FREQUENCY;
- startTime = transformTimeFormat(message.getString(ADFJsonConstants.ADF_REQUEST_START_TIME));
- endTime = transformTimeFormat(message.getString(ADFJsonConstants.ADF_REQUEST_END_TIME));
-
- JSONArray linkedServices = message.getJSONArray(ADFJsonConstants.ADF_REQUEST_LINKED_SERVICES);
- for (int i = 0; i < linkedServices.length(); i++) {
- JSONObject linkedService = linkedServices.getJSONObject(i);
- linkedServicesMap.put(linkedService.getString(ADFJsonConstants.ADF_REQUEST_NAME), linkedService);
- }
-
- JSONArray tables = message.getJSONArray(ADFJsonConstants.ADF_REQUEST_TABLES);
- for (int i = 0; i < tables.length(); i++) {
- JSONObject table = tables.getJSONObject(i);
- tablesMap.put(table.getString(ADFJsonConstants.ADF_REQUEST_NAME), table);
- }
-
- // Set the activity extended properties
- activity = message.getJSONObject(ADFJsonConstants.ADF_REQUEST_ACTIVITY);
- if (activity == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_ACTIVITY + " not found in ADF"
- + " request.");
- }
-
- JSONObject policy = activity.getJSONObject(ADFJsonConstants.ADF_REQUEST_POLICY);
- /* IS policy mandatory */
- if (policy == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_POLICY + " not found"
- + " in ADF request.");
- }
- String adfTimeout = policy.getString(ADFJsonConstants.ADF_REQUEST_TIMEOUT);
- if (StringUtils.isBlank(adfTimeout)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_TIMEOUT + " not found"
- + " in ADF request.");
- }
- timeout = parseADFRequestTimeout(adfTimeout);
-
- JSONObject activityProperties = activity.getJSONObject(ADFJsonConstants.ADF_REQUEST_TRANSFORMATION);
- if (activityProperties == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_TRANSFORMATION + " not found"
- + " in ADF request.");
- }
-
- activityExtendedProperties = activityProperties.getJSONObject(
- ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES);
- if (activityExtendedProperties == null) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES + " not"
- + " found in ADF request.");
- }
-
- // should be called after setting activityExtendedProperties
- proxyUser = getRunAsUser();
-
- // log in the user
- CurrentUser.authenticate(proxyUser);
- } catch (JSONException e) {
- throw new FalconException("Error while parsing ADF JSON message: " + msg, e);
- }
- }
-
- public String jobEntityName() {
- return ADF_JOB_ENTITY_NAME_PREFIX + id;
- }
-
- public JobType jobType() {
- return type;
- }
-
- protected String getClusterName(String linkedServiceName) throws FalconException {
- JSONObject linkedService = linkedServicesMap.get(linkedServiceName);
- if (linkedService == null) {
- throw new FalconException("Linked service " + linkedServiceName + " not found in ADF request.");
- }
-
- try {
- return linkedService.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES)
- .getJSONObject(ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES)
- .getString(ADFJsonConstants.ADF_REQUEST_CLUSTER_NAME);
- } catch (JSONException e) {
- throw new FalconException("Error while parsing linked service " + linkedServiceName + " in ADF request.");
- }
- }
-
- protected String getRunAsUser() throws FalconException {
- if (activityExtendedProperties.has(ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER)) {
- String runAsUser = null;
- try {
- runAsUser = activityExtendedProperties.getString(ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER);
- } catch (JSONException e) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER + " not"
- + " found in ADF request.");
- }
-
- if (StringUtils.isBlank(runAsUser)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER + " in"
- + " ADF request activity extended properties cannot be empty.");
- }
- return runAsUser;
- } else {
- String hadoopLinkedService = getHadoopLinkedService();
- JSONObject linkedService = linkedServicesMap.get(hadoopLinkedService);
- if (linkedService == null) {
- throw new FalconException("JSON object " + hadoopLinkedService + " not"
- + " found in ADF request.");
- }
-
- try {
- return linkedService.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES)
- .getJSONObject(ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES)
- .getString(ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER);
- } catch (JSONException e) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_RUN_ON_BEHALF_USER + " not"
- + " found in ADF request.");
- }
- }
- }
-
- protected List<String> getInputTables() throws FalconException {
- List<String> tables = new ArrayList<String>();
- try {
- JSONArray inputs = message.getJSONObject(ADFJsonConstants.ADF_REQUEST_ACTIVITY)
- .getJSONArray(ADFJsonConstants.ADF_REQUEST_INPUTS);
- for (int i = 0; i < inputs.length(); i++) {
- tables.add(inputs.getJSONObject(i).getString(ADFJsonConstants.ADF_REQUEST_NAME));
- }
- } catch (JSONException e) {
- throw new FalconException("Error while reading input table names in ADF request.");
- }
- return tables;
- }
-
- protected List<String> getOutputTables() throws FalconException {
- List<String> tables = new ArrayList<String>();
- try {
- JSONArray outputs = message.getJSONObject(ADFJsonConstants.ADF_REQUEST_ACTIVITY)
- .getJSONArray(ADFJsonConstants.ADF_REQUEST_OUTPUTS);
- for (int i = 0; i < outputs.length(); i++) {
- tables.add(outputs.getJSONObject(i).getString(ADFJsonConstants.ADF_REQUEST_NAME));
- }
- } catch (JSONException e) {
- throw new FalconException("Error while reading output table names in ADF request.");
- }
- return tables;
- }
-
- protected String getADFTablePath(String tableName) throws FalconException {
- JSONObject table = tablesMap.get(tableName);
- if (table == null) {
- throw new FalconException("JSON object " + tableName + " not"
- + " found in ADF request.");
- }
-
- try {
- JSONObject location = table.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES)
- .getJSONObject(ADFJsonConstants.ADF_REQUEST_LOCATION);
- String requestType = location.getString(ADFJsonConstants.ADF_REQUEST_TYPE);
- if (requestType.equals(ADFJsonConstants.ADF_REQUEST_LOCATION_TYPE_AZURE_BLOB)) {
- String blobPath = location.getString(ADFJsonConstants.ADF_REQUEST_FOLDER_PATH);
- int index = blobPath.indexOf('/');
- if (index == -1) {
- throw new FalconException("Invalid azure blob path: " + blobPath);
- }
-
- String linkedServiceName = location.getString(ADFJsonConstants.ADF_REQUEST_LINKED_SERVICE_NAME);
- JSONObject linkedService = linkedServicesMap.get(linkedServiceName);
- if (linkedService == null) {
- throw new FalconException("Can't find linked service " + linkedServiceName + " for azure blob");
- }
- String connectionString = linkedService.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES)
- .getString(ADFJsonConstants.ADF_REQUEST_CONNECTION_STRING);
- int accountNameIndex = connectionString.indexOf(ADFJsonConstants.ADF_REQUEST_BLOB_ACCOUNT_NAME)
- + ADFJsonConstants.ADF_REQUEST_BLOB_ACCOUNT_NAME.length();
- String accountName = connectionString.substring(accountNameIndex,
- connectionString.indexOf(';', accountNameIndex));
-
- StringBuilder blobUrl = new StringBuilder("wasb://")
- .append(blobPath.substring(0, index)).append("@")
- .append(accountName).append(".blob.core.windows.net")
- .append(blobPath.substring(index));
- return blobUrl.toString();
- }
- return location.getJSONObject(ADFJsonConstants.ADF_REQUEST_EXTENDED_PROPERTIES)
- .getString(ADFJsonConstants.ADF_REQUEST_FOLDER_PATH);
- } catch (JSONException e) {
- throw new FalconException("Error while parsing ADF JSON message: " + tableName, e);
- }
- }
-
- protected String getTableCluster(String tableName) throws FalconException {
- JSONObject table = tablesMap.get(tableName);
- if (table == null) {
- throw new FalconException("Table " + tableName + " not found in ADF request.");
- }
-
- try {
- String linkedServiceName = table.getJSONObject(ADFJsonConstants.ADF_REQUEST_PROPERTIES)
- .getJSONObject(ADFJsonConstants.ADF_REQUEST_LOCATION)
- .getString(ADFJsonConstants.ADF_REQUEST_LINKED_SERVICE_NAME);
- return getClusterName(linkedServiceName);
- } catch (JSONException e) {
- throw new FalconException("Error while parsing table cluster " + tableName + " in ADF request.");
- }
- }
-
- protected boolean activityHasScriptPath() throws FalconException {
- if (JobType.REPLICATION == jobType()) {
- return false;
- }
-
- return activityExtendedProperties.has(
- ADFJsonConstants.ADF_REQUEST_SCRIPT_PATH);
- }
-
- protected String getScriptPath() throws FalconException {
- if (!activityHasScriptPath()) {
- throw new FalconException("JSON object does not have object: "
- + ADFJsonConstants.ADF_REQUEST_SCRIPT_PATH);
- }
-
- try {
- String scriptPath = activityExtendedProperties.getString(ADFJsonConstants.ADF_REQUEST_SCRIPT_PATH);
- if (StringUtils.isBlank(scriptPath)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_SCRIPT_PATH + " not"
- + " found or empty in ADF request.");
- }
- return scriptPath;
- } catch (JSONException jsonException) {
- throw new FalconException("Error while parsing ADF JSON object: "
- + activityExtendedProperties, jsonException);
- }
- }
-
- protected String getScriptContent() throws FalconException {
- if (activityHasScriptPath()) {
- throw new FalconException("JSON object does not have object: " + ADFJsonConstants.ADF_REQUEST_SCRIPT);
- }
- try {
- String script = activityExtendedProperties.getString(ADFJsonConstants.ADF_REQUEST_SCRIPT);
- if (StringUtils.isBlank(script)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_SCRIPT + " cannot"
- + " be empty in ADF request.");
- }
- return script;
- } catch (JSONException jsonException) {
- throw new FalconException("Error while parsing ADF JSON object: "
- + activityExtendedProperties, jsonException);
- }
- }
-
- protected String getClusterNameToRunProcessOn() throws FalconException {
- return getClusterName(getHadoopLinkedService());
- }
-
- protected Entity submitAndScheduleJob(String entityType, String msg) throws FalconException {
- Entity entity = jobManager.submitJob(entityType, msg);
- jobManager.scheduleJob(entityType, jobEntityName());
- return entity;
- }
-
- private String getHadoopLinkedService() throws FalconException {
- String hadoopLinkedService;
- try {
- hadoopLinkedService = activity.getString(ADFJsonConstants.ADF_REQUEST_LINKED_SERVICE_NAME);
- } catch (JSONException jsonException) {
- throw new FalconException("Error while parsing ADF JSON object: "
- + activity, jsonException);
- }
-
- if (StringUtils.isBlank(hadoopLinkedService)) {
- throw new FalconException("JSON object " + ADFJsonConstants.ADF_REQUEST_LINKED_SERVICE_NAME
- + " in the activity cannot be empty in ADF request.");
- }
- return hadoopLinkedService;
- }
-
- protected void startProcess(Feed inputFeed, Feed outputFeed,
- String engineType, String scriptPath) throws FalconException {
- // submit input/output feeds
- LOG.info("submitting input feed {} for {} process", inputFeed.getName(), engineType);
- jobManager.submitJob(EntityType.FEED.name(), inputFeed.getEntityxml());
-
- LOG.info("submitting output feed {} for {} process", outputFeed.getName(), engineType);
- jobManager.submitJob(EntityType.FEED.name(), outputFeed.getEntityxml());
-
- // submit and schedule process
- String processRequest = new Process.Builder().withProcessName(jobEntityName()).withFrequency(frequency)
- .withStartTime(startTime).withEndTime(endTime).withClusterName(getClusterNameToRunProcessOn())
- .withInputFeedName(inputFeed.getName()).withOutputFeedName(outputFeed.getName())
- .withEngineType(engineType).withWFPath(scriptPath).withAclOwner(proxyUser)
- .build().getEntityxml();
-
- LOG.info("submitting/scheduling {} process: {}", engineType, processRequest);
- submitAndScheduleJob(EntityType.PROCESS.name(), processRequest);
- LOG.info("submitted and scheduled {} process: {}", engineType, jobEntityName());
- }
-
- protected void cleanupProcess(Feed inputFeed, Feed outputFeed) throws FalconException {
- // delete the entities. Should be called after the job execution success/failure.
- jobManager.deleteEntity(EntityType.PROCESS.name(), jobEntityName());
- jobManager.deleteEntity(EntityType.FEED.name(), inputFeed.getName());
- jobManager.deleteEntity(EntityType.FEED.name(), outputFeed.getName());
-
- // delete script file
- FSUtils.removeDir(new Path(ADFJob.PROCESS_SCRIPTS_PATH, jobEntityName()));
- }
-
- protected String createScriptFile(String fileExt) throws FalconException {
- String content = getScriptContent();
-
- // create dir; dir path is unique as job name is always unique
- final Path dir = new Path(ADFJob.PROCESS_SCRIPTS_PATH, jobEntityName());
- FSUtils.createDir(dir);
-
- // create script file
- final Path path = new Path(dir, jobEntityName() + fileExt);
- return FSUtils.createFile(path, content);
- }
-
- private static long parseADFRequestTimeout(String timeout) throws FalconException {
- timeout = timeout.trim();
- // [ws][-]{ d | d.hh:mm[:ss[.ff]] | hh:mm[:ss[.ff]] }[ws]
- if (timeout.startsWith("-")) {
- return -1;
- }
-
- long totalMinutes = 0;
- String [] dotParts = timeout.split(Pattern.quote("."));
- if (dotParts.length == 1) {
- // no d or ff
- // chk if only d
- // Formats can be d|hh:mm[:ss]
- String[] parts = timeout.split(":");
- if (parts.length == 1) {
- // only day. Convert days to minutes
- return Integer.parseInt(parts[0]) * 1440;
- } else {
- // hh:mm[:ss]
- return computeMinutes(parts);
- }
- }
-
- // if . is present, formats can be d.hh:mm[:ss[.ff]] | hh:mm[:ss[.ff]]
- if (dotParts.length == 2) {
- // can be d.hh:mm[:ss] or hh:mm[:ss[.ff]
- // check if first part has colons
- String [] parts = dotParts[0].split(":");
- if (parts.length == 1) {
- // format is d.hh:mm[:ss]
- totalMinutes = Integer.parseInt(dotParts[0]) * 1440;
- parts = dotParts[1].split(":");
- totalMinutes += computeMinutes(parts);
- return totalMinutes;
- } else {
- // format is hh:mm[:ss[.ff]
- parts = dotParts[0].split(":");
- totalMinutes += computeMinutes(parts);
- // round off ff
- totalMinutes += 1;
- return totalMinutes;
- }
- } else if (dotParts.length == 3) {
- // will be d.hh:mm[:ss[.ff]
- totalMinutes = Integer.parseInt(dotParts[0]) * 1440;
- String [] parts = dotParts[1].split(":");
- totalMinutes += computeMinutes(parts);
- // round off ff
- totalMinutes += 1;
- return totalMinutes;
- } else {
- throw new FalconException("Error parsing policy timeout: " + timeout);
- }
- }
-
- // format hh:mm[:ss]
- private static long computeMinutes(String[] parts) {
- // hh:mm[:ss]
- int totalMinutes = Integer.parseInt(parts[0]) * 60;
- totalMinutes += Integer.parseInt(parts[1]);
- if (parts.length == 3) {
- // Second round off to minutes
- totalMinutes += 1;
- }
- return totalMinutes;
- }
-
- private static String transformTimeFormat(String adfTime) {
- return adfTime.substring(0, adfTime.length()-4) + "Z";
- }
-
- protected class ADFJobManager extends AbstractSchedulableEntityManager {
- public Entity submitJob(String entityType, String msg) throws FalconException {
- try {
- InputStream stream = IOUtils.toInputStream(msg);
- Entity entity = submitInternal(stream, entityType, proxyUser);
- return entity;
- } catch (Exception e) {
- LOG.info(e.toString());
- throw new FalconException("Error when submitting job: " + e.toString());
- }
- }
-
- public void scheduleJob(String entityType, String entityName) throws FalconException {
- try {
- scheduleInternal(entityType, entityName, null, EntityUtil.getPropertyMap(null));
- } catch (Exception e) {
- LOG.info(e.toString());
- throw new FalconException("Error when scheduling job: " + e.toString());
- }
- }
-
- public void deleteEntity(String entityType, String entityName) throws FalconException {
- delete(entityType, entityName, null);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJobFactory.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJobFactory.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJobFactory.java
deleted file mode 100644
index ceea6a4..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFJobFactory.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Azure ADB Job factory to generate ADFJob for each job type.
- */
-public final class ADFJobFactory {
- public static ADFJob buildADFJob(String msg, String id) throws FalconException {
- ADFJob.JobType jobType = ADFJob.getJobType(msg);
- switch (jobType) {
- case REPLICATION:
- return new ADFReplicationJob(msg, id);
- case HIVE:
- return new ADFHiveJob(msg, id);
- case PIG:
- return new ADFPigJob(msg, id);
- default:
- throw new FalconException("Invalid job type: " + jobType.toString());
- }
- }
-
- private ADFJobFactory() {
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFPigJob.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFPigJob.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFPigJob.java
deleted file mode 100644
index 041eb48..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFPigJob.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Azure ADF Pig Job.
- */
-public class ADFPigJob extends ADFJob {
- private static final String PIG_SCRIPT_EXTENSION = ".pig";
- private static final String ENGINE_TYPE = "pig";
- private static final String INPUT_FEED_SUFFIX = "-pig-input-feed";
- private static final String OUTPUT_FEED_SUFFIX = "-pig-output-feed";
-
- private String pigScriptPath;
- private DataFeed inputDataFeed;
- private DataFeed outputDataFeed;
-
- public ADFPigJob(String message, String id) throws FalconException {
- super(message, id);
- type = JobType.PIG;
- inputDataFeed = getInputFeed();
- outputDataFeed = getOutputFeed();
- pigScriptPath = activityHasScriptPath() ? getScriptPath() : createScriptFile(PIG_SCRIPT_EXTENSION);
- }
-
- @Override
- public void startJob() throws FalconException {
- startProcess(inputDataFeed, outputDataFeed, ENGINE_TYPE, pigScriptPath);
- }
-
- @Override
- public void cleanup() throws FalconException {
- cleanupProcess(inputDataFeed, outputDataFeed);
- }
-
- private DataFeed getInputFeed() throws FalconException {
- return getFeed(jobEntityName() + INPUT_FEED_SUFFIX, getInputTables().get(0),
- getTableCluster(getInputTables().get(0)));
- }
-
- private DataFeed getOutputFeed() throws FalconException {
- return getFeed(jobEntityName() + OUTPUT_FEED_SUFFIX, getOutputTables().get(0),
- getTableCluster(getOutputTables().get(0)));
- }
-
- private DataFeed getFeed(final String feedName, final String tableName,
- final String clusterName) throws FalconException {
- return new DataFeed.Builder().withFeedName(feedName).withFrequency(frequency)
- .withClusterName(clusterName).withStartTime(startTime).withEndTime(endTime)
- .withAclOwner(proxyUser).withLocationPath(getADFTablePath(tableName)).build();
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFProviderService.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFProviderService.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFProviderService.java
deleted file mode 100644
index 3438b2f..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFProviderService.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import com.microsoft.windowsazure.Configuration;
-import com.microsoft.windowsazure.exception.ServiceException;
-import com.microsoft.windowsazure.services.servicebus.ServiceBusService;
-import com.microsoft.windowsazure.services.servicebus.models.BrokeredMessage;
-import com.microsoft.windowsazure.services.servicebus.models.ReceiveMessageOptions;
-import com.microsoft.windowsazure.services.servicebus.models.ReceiveMode;
-import com.microsoft.windowsazure.services.servicebus.models.ReceiveQueueMessageResult;
-import com.microsoft.windowsazure.services.servicebus.ServiceBusConfiguration;
-import com.microsoft.windowsazure.services.servicebus.ServiceBusContract;
-
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.falcon.adfservice.util.ADFJsonConstants;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.store.ConfigurationStore;
-import org.apache.falcon.entity.v0.EntityType;
-import org.apache.falcon.resource.AbstractInstanceManager;
-import org.apache.falcon.resource.InstancesResult;
-import org.apache.falcon.resource.InstancesResult.Instance;
-import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
-import org.apache.falcon.security.CurrentUser;
-import org.apache.falcon.service.FalconService;
-import org.apache.falcon.service.Services;
-import org.apache.falcon.util.StartupProperties;
-import org.apache.falcon.workflow.WorkflowExecutionListener;
-import org.apache.falcon.workflow.WorkflowExecutionContext;
-import org.apache.falcon.workflow.WorkflowJobEndNotificationService;
-import org.json.JSONException;
-import org.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Falcon ADF provider to handle requests from Azure Data Factory.
- */
-public class ADFProviderService implements FalconService, WorkflowExecutionListener {
-
- private static final Logger LOG = LoggerFactory.getLogger(ADFProviderService.class);
-
- /**
- * Constant for the service name.
- */
- public static final String SERVICE_NAME = ADFProviderService.class.getSimpleName();
-
- private static final int AZURE_SERVICEBUS_RECEIVEMESSGAEOPT_TIMEOUT = 60;
- // polling frequency in seconds
- private static final int AZURE_SERVICEBUS_DEFAULT_POLLING_FREQUENCY = 10;
-
- // Number of threads to handle ADF requests
- private static final int AZURE_SERVICEBUS_REQUEST_HANDLING_THREADS = 5;
-
- private static final String AZURE_SERVICEBUS_CONF_PREFIX = "microsoft.windowsazure.services.servicebus.";
- private static final String AZURE_SERVICEBUS_CONF_SASKEYNAME = "sasKeyName";
- private static final String AZURE_SERVICEBUS_CONF_SASKEY = "sasKey";
- private static final String AZURE_SERVICEBUS_CONF_SERVICEBUSROOTURI = "serviceBusRootUri";
- private static final String AZURE_SERVICEBUS_CONF_NAMESPACE = "namespace";
- private static final String AZURE_SERVICEBUS_CONF_POLLING_FREQUENCY = "polling.frequency";
- private static final String AZURE_SERVICEBUS_CONF_REQUEST_QUEUE_NAME = "requestqueuename";
- private static final String AZURE_SERVICEBUS_CONF_STATUS_QUEUE_NAME = "statusqueuename";
- private static final String AZURE_SERVICEBUS_CONF_SUPER_USER = "superuser";
-
- private static final ConfigurationStore STORE = ConfigurationStore.get();
-
- private ServiceBusContract service;
- private ScheduledExecutorService adfScheduledExecutorService;
- private ReceiveMessageOptions opts = ReceiveMessageOptions.DEFAULT;
- private ADFInstanceManager instanceManager = new ADFInstanceManager();
- private String requestQueueName;
- private String statusQueueName;
- private String superUser;
-
- @Override
- public String getName() {
- return SERVICE_NAME;
- }
-
- @Override
- public void init() throws FalconException {
- // read start up properties for adf configuration
- service = ServiceBusService.create(getServiceBusConfig());
-
- requestQueueName = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_REQUEST_QUEUE_NAME);
- if (StringUtils.isBlank(requestQueueName)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_REQUEST_QUEUE_NAME
- + " property not set in startup properties. Please add it.");
- }
- statusQueueName = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_STATUS_QUEUE_NAME);
- if (StringUtils.isBlank(statusQueueName)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_STATUS_QUEUE_NAME
- + " property not set in startup properties. Please add it.");
- }
-
- // init opts
- opts.setReceiveMode(ReceiveMode.PEEK_LOCK);
- opts.setTimeout(AZURE_SERVICEBUS_RECEIVEMESSGAEOPT_TIMEOUT);
-
- // restart handling
- superUser = StartupProperties.get().getProperty(
- AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_SUPER_USER);
- if (StringUtils.isBlank(superUser)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_SUPER_USER
- + " property not set in startup properties. Please add it.");
- }
- CurrentUser.authenticate(superUser);
- for (EntityType entityType : EntityType.values()) {
- Collection<String> entities = STORE.getEntities(entityType);
- for (String entityName : entities) {
- updateJobStatus(entityName, entityType.toString());
- }
- }
-
- Services.get().<WorkflowJobEndNotificationService>getService(
- WorkflowJobEndNotificationService.SERVICE_NAME).registerListener(this);
- adfScheduledExecutorService = new ADFScheduledExecutor(AZURE_SERVICEBUS_REQUEST_HANDLING_THREADS);
- adfScheduledExecutorService.scheduleWithFixedDelay(new HandleADFRequests(), 0, getDelay(), TimeUnit.SECONDS);
- LOG.info("Falcon ADFProvider service initialized");
- }
-
- private class HandleADFRequests implements Runnable {
-
- @Override
- public void run() {
- String sessionID = null;
- try {
- LOG.info("To read message from adf...");
- ReceiveQueueMessageResult resultQM =
- service.receiveQueueMessage(requestQueueName, opts);
- BrokeredMessage message = resultQM.getValue();
- if (message != null && message.getMessageId() != null) {
- sessionID = message.getReplyToSessionId();
- BufferedReader rd = new BufferedReader(
- new InputStreamReader(message.getBody()));
- StringBuilder sb = new StringBuilder();
- String line;
- while ((line = rd.readLine()) != null) {
- sb.append(line);
- }
- rd.close();
- String msg = sb.toString();
- LOG.info("ADF message: " + msg);
-
- service.deleteMessage(message);
-
- ADFJob job = ADFJobFactory.buildADFJob(msg, sessionID);
- job.startJob();
- } else {
- LOG.info("No message from adf");
- }
- } catch (FalconException e) {
- if (sessionID != null) {
- sendErrorMessage(sessionID, e.toString());
- }
- LOG.info(e.toString());
- } catch (ServiceException | IOException e) {
- LOG.info(e.toString());
- }
- }
- }
-
- private static Configuration getServiceBusConfig() throws FalconException {
- String namespace = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_NAMESPACE);
- if (StringUtils.isBlank(namespace)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_NAMESPACE
- + " property not set in startup properties. Please add it.");
- }
-
- String sasKeyName = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_SASKEYNAME);
- if (StringUtils.isBlank(sasKeyName)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_SASKEYNAME
- + " property not set in startup properties. Please add it.");
- }
-
- String sasKey = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_SASKEY);
- if (StringUtils.isBlank(sasKey)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_SASKEY
- + " property not set in startup properties. Please add it.");
- }
-
- String serviceBusRootUri = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_SERVICEBUSROOTURI);
- if (StringUtils.isBlank(serviceBusRootUri)) {
- throw new FalconException(AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_SERVICEBUSROOTURI
- + " property not set in startup properties. Please add it.");
- }
-
- LOG.info("namespace: {}, sas key name: {}, sas key: {}, root uri: {}",
- namespace, sasKeyName, sasKey, serviceBusRootUri);
- return ServiceBusConfiguration.configureWithSASAuthentication(namespace, sasKeyName, sasKey,
- serviceBusRootUri);
- }
-
-
- // gets delay in seconds
- private long getDelay() throws FalconException {
- String pollingFrequencyValue = StartupProperties.get().getProperty(AZURE_SERVICEBUS_CONF_PREFIX
- + AZURE_SERVICEBUS_CONF_POLLING_FREQUENCY);
- long pollingFrequency;
- try {
- pollingFrequency = (StringUtils.isNotEmpty(pollingFrequencyValue))
- ? Long.parseLong(pollingFrequencyValue) : AZURE_SERVICEBUS_DEFAULT_POLLING_FREQUENCY;
- } catch (NumberFormatException nfe) {
- throw new FalconException("Invalid value provided for startup property "
- + AZURE_SERVICEBUS_CONF_PREFIX + AZURE_SERVICEBUS_CONF_POLLING_FREQUENCY
- + ", please provide a valid long number", nfe);
- }
- return pollingFrequency;
- }
-
- @Override
- public void destroy() throws FalconException {
- Services.get().<WorkflowJobEndNotificationService>getService(
- WorkflowJobEndNotificationService.SERVICE_NAME).unregisterListener(this);
- adfScheduledExecutorService.shutdown();
- }
-
- @Override
- public void onSuccess(WorkflowExecutionContext context) throws FalconException {
- updateJobStatus(context, ADFJsonConstants.ADF_STATUS_SUCCEEDED, 100);
- }
-
- @Override
- public void onFailure(WorkflowExecutionContext context) throws FalconException {
- updateJobStatus(context, ADFJsonConstants.ADF_STATUS_FAILED, 0);
- }
-
- @Override
- public void onStart(WorkflowExecutionContext context) throws FalconException {
- updateJobStatus(context, ADFJsonConstants.ADF_STATUS_EXECUTING, 0);
- }
-
- @Override
- public void onSuspend(WorkflowExecutionContext context) throws FalconException {
- updateJobStatus(context, ADFJsonConstants.ADF_STATUS_CANCELED, 0);
- }
-
- @Override
- public void onWait(WorkflowExecutionContext context) throws FalconException {
- updateJobStatus(context, ADFJsonConstants.ADF_STATUS_EXECUTING, 0);
- }
-
- private void updateJobStatus(String entityName, String entityType) throws FalconException {
- // Filter non-adf jobs
- if (!ADFJob.isADFJobEntity(entityName)) {
- return;
- }
-
- Instance instance = instanceManager.getFirstInstance(entityName, entityType);
- if (instance == null) {
- return;
- }
-
- WorkflowStatus workflowStatus = instance.getStatus();
- String status;
- int progress = 0;
- switch (workflowStatus) {
- case SUCCEEDED:
- progress = 100;
- status = ADFJsonConstants.ADF_STATUS_SUCCEEDED;
- break;
- case FAILED:
- case KILLED:
- case ERROR:
- case SKIPPED:
- case UNDEFINED:
- status = ADFJsonConstants.ADF_STATUS_FAILED;
- break;
- default:
- status = ADFJsonConstants.ADF_STATUS_EXECUTING;
- }
- updateJobStatus(entityName, status, progress, instance.getLogFile());
- }
-
- private void updateJobStatus(WorkflowExecutionContext context, String status, int progress) {
- // Filter non-adf jobs
- String entityName = context.getEntityName();
- if (!ADFJob.isADFJobEntity(entityName)) {
- return;
- }
-
- updateJobStatus(entityName, status, progress, context.getLogFile());
- }
-
- private void updateJobStatus(String entityName, String status, int progress, String logUrl) {
- try {
- String sessionID = ADFJob.getSessionID(entityName);
- LOG.info("To update job status: " + sessionID + ", " + entityName + ", " + status + ", " + logUrl);
- JSONObject obj = new JSONObject();
- obj.put(ADFJsonConstants.ADF_STATUS_PROTOCOL, ADFJsonConstants.ADF_STATUS_PROTOCOL_NAME);
- obj.put(ADFJsonConstants.ADF_STATUS_JOBID, sessionID);
- obj.put(ADFJsonConstants.ADF_STATUS_LOG_URL, logUrl);
- obj.put(ADFJsonConstants.ADF_STATUS_STATUS, status);
- obj.put(ADFJsonConstants.ADF_STATUS_PROGRESS, progress);
- sendStatusUpdate(sessionID, obj.toString());
- } catch (JSONException | FalconException e) {
- LOG.info("Error when updating job status: " + e.toString());
- }
- }
-
- private void sendErrorMessage(String sessionID, String errorMessage) {
- LOG.info("Sending error message for session " + sessionID + ": " + errorMessage);
- try {
- JSONObject obj = new JSONObject();
- obj.put(ADFJsonConstants.ADF_STATUS_PROTOCOL, ADFJsonConstants.ADF_STATUS_PROTOCOL_NAME);
- obj.put(ADFJsonConstants.ADF_STATUS_JOBID, sessionID);
- obj.put(ADFJsonConstants.ADF_STATUS_STATUS, ADFJsonConstants.ADF_STATUS_FAILED);
- obj.put(ADFJsonConstants.ADF_STATUS_PROGRESS, 0);
- obj.put(ADFJsonConstants.ADF_STATUS_ERROR_TYPE, ADFJsonConstants.ADF_STATUS_ERROR_TYPE_VALUE);
- obj.put(ADFJsonConstants.ADF_STATUS_ERROR_MESSAGE, errorMessage);
- sendStatusUpdate(sessionID, obj.toString());
- } catch (JSONException e) {
- LOG.info("Error when sending error message: " + e.toString());
- }
- }
-
- private void sendStatusUpdate(String sessionID, String message) {
- LOG.info("Sending update for session " + sessionID + ": " + message);
- try {
- InputStream in = IOUtils.toInputStream(message, "UTF-8");
- BrokeredMessage updateMessage = new BrokeredMessage(in);
- updateMessage.setSessionId(sessionID);
- service.sendQueueMessage(statusQueueName, updateMessage);
- } catch (IOException | ServiceException e) {
- LOG.info("Error when sending status update: " + e.toString());
- }
- }
-
- private static class ADFInstanceManager extends AbstractInstanceManager {
- public Instance getFirstInstance(String entityName, String entityType) throws FalconException {
- InstancesResult result = getStatus(entityType, entityName, null, null, null, null, "", "", "", 0, 1, null);
- Instance[] instances = result.getInstances();
- if (instances.length > 0) {
- return instances[0];
- }
- return null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFReplicationJob.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFReplicationJob.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFReplicationJob.java
deleted file mode 100644
index f847a82..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFReplicationJob.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import java.net.URISyntaxException;
-
-import org.apache.falcon.adfservice.util.FSUtils;
-import org.apache.falcon.FalconException;
-import org.apache.falcon.entity.v0.EntityType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Azure ADF Replication Job (hive/hdfs to Azure blobs).
- */
-public class ADFReplicationJob extends ADFJob {
-
- private static final Logger LOG = LoggerFactory.getLogger(ADFReplicationJob.class);
-
- public static final String TEMPLATE_REPLICATION_FEED = "replicate-feed.xml";
- public static final String REPLICATION_TARGET_CLUSTER = "adf-replication-target-cluster";
-
- public ADFReplicationJob(String message, String id) throws FalconException {
- super(message, id);
- type = JobType.REPLICATION;
- }
-
- @Override
- public void startJob() throws FalconException {
- try {
- // Note: in first clickstop, we support only one input table and one output table for replication job
- String inputTableName = getInputTables().get(0);
- String outputTableName = getOutputTables().get(0);
- String template = FSUtils.readHDFSFile(TEMPLATE_PATH_PREFIX, TEMPLATE_REPLICATION_FEED);
- String message = template.replace("$feedName$", jobEntityName())
- .replace("$frequency$", frequency)
- .replace("$startTime$", startTime)
- .replace("$endTime$", endTime)
- .replace("$clusterSource$", getTableCluster(inputTableName))
- .replace("$clusterTarget$", REPLICATION_TARGET_CLUSTER)
- .replace("$sourceLocation$", getADFTablePath(inputTableName))
- .replace("$targetLocation$", getADFTablePath(outputTableName));
- submitAndScheduleJob(EntityType.FEED.name(), message);
- } catch (URISyntaxException e) {
- LOG.info(e.toString());
- }
-
- }
-
- @Override
- public void cleanup() throws FalconException {
- // Delete the entities. Should be called after the job execution success/failure.
- jobManager.deleteEntity(EntityType.FEED.name(), jobEntityName());
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFScheduledExecutor.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFScheduledExecutor.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFScheduledExecutor.java
deleted file mode 100644
index df5a993..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/ADFScheduledExecutor.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-/**
- * ADF thread pool executor.
- */
-public class ADFScheduledExecutor extends ScheduledThreadPoolExecutor {
-
- private static final Logger LOG = LoggerFactory.getLogger(ADFScheduledExecutor.class);
-
- public ADFScheduledExecutor(int corePoolSize) {
- super(corePoolSize);
- }
-
- @Override
- public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) {
- return super.scheduleAtFixedRate(wrapRunnable(command), initialDelay, period, unit);
- }
-
- @Override
- public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) {
- return super.scheduleWithFixedDelay(wrapRunnable(command), initialDelay, delay, unit);
- }
-
- private Runnable wrapRunnable(Runnable command) {
- return new LogOnExceptionRunnable(command);
- }
-
- private static class LogOnExceptionRunnable implements Runnable {
- private Runnable runnable;
-
- public LogOnExceptionRunnable(Runnable runnable) {
- super();
- this.runnable = runnable;
- }
-
- @Override
- public void run() {
- try {
- runnable.run();
- } catch (Throwable t) {
- LOG.info("Error while executing: {}", t.getMessage());
- throw new RuntimeException(t);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/DataFeed.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/DataFeed.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/DataFeed.java
deleted file mode 100644
index 32d2757..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/DataFeed.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.adfservice.util.FSUtils;
-import org.apache.falcon.FalconException;
-
-import java.net.URISyntaxException;
-
-/**
- * Class for data Feed.
- */
-public class DataFeed extends Feed {
- private static final String FEED_TEMPLATE_FILE = "feed.xml";
- private String locationPath;
-
- public DataFeed(final Builder builder) {
- this.feedName = builder.name;
- this.clusterName = builder.feedClusterName;
- this.frequency = builder.feedFrequency;
- this.startTime = builder.feedStartTime;
- this.endTime = builder.feedEndTime;
- this.locationPath = builder.feedLocationPath;
- this.aclOwner = builder.feedAclOwner;
- }
-
- @Override
- public String getEntityxml() throws FalconException {
- try {
- String template = FSUtils.readHDFSFile(ADFJob.TEMPLATE_PATH_PREFIX, FEED_TEMPLATE_FILE);
- return template.replace("$feedName$", feedName)
- .replace("$frequency$", frequency)
- .replace("$startTime$", startTime)
- .replace("$endTime$", endTime)
- .replace("$cluster$", clusterName)
- .replace("$location$", locationPath)
- .replace("$aclowner$", aclOwner);
- } catch (URISyntaxException e) {
- throw new FalconException("Error when generating entity xml for table feed", e);
- }
- }
-
- /**
- * Builder for table Feed.
- */
- public static class Builder {
- private String name;
- private String feedClusterName;
- private String feedFrequency;
- private String feedStartTime;
- private String feedEndTime;
- private String feedLocationPath;
- private String feedAclOwner;
-
- public DataFeed build() {
- return new DataFeed(this);
- }
-
- public Builder withFeedName(final String feedName) {
- this.name = feedName;
- return this;
- }
-
- public Builder withClusterName(final String clusterName) {
- this.feedClusterName = clusterName;
- return this;
- }
-
- public Builder withFrequency(final String frequency) {
- this.feedFrequency = frequency;
- return this;
- }
-
- public Builder withStartTime(final String startTime) {
- this.feedStartTime = startTime;
- return this;
- }
-
- public Builder withEndTime(final String endTime) {
- this.feedEndTime = endTime;
- return this;
- }
-
- public Builder withLocationPath(final String locationPath) {
- this.feedLocationPath = locationPath;
- return this;
- }
-
- public Builder withAclOwner(final String aclOwner) {
- this.feedAclOwner = aclOwner;
- return this;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/Feed.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/Feed.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/Feed.java
deleted file mode 100644
index d05f300..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/Feed.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.FalconException;
-
-/**
- * Abstract class for feed.
- */
-public abstract class Feed {
- protected String feedName;
- protected String clusterName;
- protected String frequency;
- protected String startTime;
- protected String endTime;
- protected String aclOwner;
-
- public String getName() {
- return feedName;
- }
-
- public abstract String getEntityxml() throws FalconException;
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/adf/src/main/java/org/apache/falcon/adfservice/Process.java
----------------------------------------------------------------------
diff --git a/addons/adf/src/main/java/org/apache/falcon/adfservice/Process.java b/addons/adf/src/main/java/org/apache/falcon/adfservice/Process.java
deleted file mode 100644
index 3a65753..0000000
--- a/addons/adf/src/main/java/org/apache/falcon/adfservice/Process.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.adfservice;
-
-import org.apache.falcon.adfservice.util.FSUtils;
-import org.apache.falcon.FalconException;
-
-import java.net.URISyntaxException;
-
-/**
- * Class for process.
- */
-public class Process {
- private static final String PROCESS_TEMPLATE_FILE = "process.xml";
-
- private String entityName;
- private String frequency;
- private String startTime;
- private String endTime;
- private String clusterName;
- private String inputFeedName;
- private String outputFeedName;
- private String engineType;
- private String wfPath;
- private String aclOwner;
-
- public Process(final Builder builder) {
- this.entityName = builder.name;
- this.clusterName = builder.processClusterName;
- this.frequency = builder.processFrequency;
- this.startTime = builder.processStartTime;
- this.endTime = builder.processEndTime;
- this.inputFeedName = builder.processInputFeedName;
- this.outputFeedName = builder.processOutputFeedName;
- this.engineType = builder.processEngineType;
- this.wfPath = builder.processWfPath;
- this.aclOwner = builder.processAclOwner;
- }
-
- public String getName() {
- return entityName;
- }
-
- public String getEntityxml() throws FalconException {
- try {
- String template = FSUtils.readHDFSFile(ADFJob.TEMPLATE_PATH_PREFIX, PROCESS_TEMPLATE_FILE);
- return template.replace("$processName$", entityName)
- .replace("$frequency$", frequency)
- .replace("$startTime$", startTime)
- .replace("$endTime$", endTime)
- .replace("$clusterName$", clusterName)
- .replace("$inputFeedName$", inputFeedName)
- .replace("$outputFeedName$", outputFeedName)
- .replace("$engine$", engineType)
- .replace("$scriptPath$", wfPath)
- .replace("$aclowner$", aclOwner);
- } catch (URISyntaxException e) {
- throw new FalconException("Error when generating process xml", e);
- }
- }
-
- /**
- * Builder for process.
- */
- public static class Builder {
- private String name;
- private String processClusterName;
- private String processFrequency;
- private String processStartTime;
- private String processEndTime;
- private String processInputFeedName;
- private String processOutputFeedName;
- private String processEngineType;
- private String processWfPath;
- private String processAclOwner;
-
- public Process build() {
- return new Process(this);
- }
-
- public Builder withProcessName(final String processName) {
- this.name = processName;
- return this;
- }
-
- public Builder withClusterName(final String clusterName) {
- this.processClusterName = clusterName;
- return this;
- }
-
- public Builder withFrequency(final String frequency) {
- this.processFrequency = frequency;
- return this;
- }
-
- public Builder withStartTime(final String startTime) {
- this.processStartTime = startTime;
- return this;
- }
-
- public Builder withEndTime(final String endTime) {
- this.processEndTime = endTime;
- return this;
- }
-
- public Builder withInputFeedName(final String inputFeedName) {
- this.processInputFeedName = inputFeedName;
- return this;
- }
-
- public Builder withOutputFeedName(final String outputFeedName) {
- this.processOutputFeedName = outputFeedName;
- return this;
- }
-
- public Builder withAclOwner(final String aclOwner) {
- this.processAclOwner = aclOwner;
- return this;
- }
-
- public Builder withEngineType(final String engineType) {
- this.processEngineType = engineType;
- return this;
- }
-
- public Builder withWFPath(final String wfPath) {
- this.processWfPath = wfPath;
- return this;
- }
- }
-
-}
[5/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalSchema.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalSchema.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalSchema.java
deleted file mode 100644
index f4f44d1..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/schema/RelationalSchema.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.schema;
-
-import org.apache.falcon.designer.storage.Storage;
-import org.apache.falcon.designer.storage.StorageException;
-import org.apache.falcon.designer.storage.Storeable;
-
-import javax.annotation.Nonnull;
-import java.sql.Types;
-
-/**
- * Relational Schema allows data to be represented similar to a relational
- * table comprising of rows and columns. Data types for each column is
- * allowed to be both primitive and complex data types ranging from simple
- * integer to complex structures such as {@link java.util.Map},
- * {@link java.util.Set}, {@link java.util.List}, Thrift structure and
- * Protobuf messages etc.
- */
-public class RelationalSchema implements Storeable {
-
- /**
- * Gets the total number of columns present in the
- * underlying data source that conforms to this schema.
- *
- * @return Total Number of columns.
- */
- public int getColumnCount() {
- return -1;
- }
-
- /**
- * Gets the designated alias of a given column number
- * as visible in the underlying data source.
- *
- * @param columnNumber - Can range from 0 to totalColumns - 1
- * @return Alias of the column
- */
- public String getColumnAlias(int columnNumber) {
- return "";
- }
-
- /**
- * Gets the column type of a given column as visible
- * in the underlying data source.
- *
- * @param columnNumber - Can range from 0 to totalColumns - 1
- * @return {@link java.sql.Types}
- */
- public int getColumnType(int columnNumber) {
- return Types.NULL;
- }
-
- @Override
- public void store(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-
- @Override
- public void restore(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-
- @Override
- public void delete(@Nonnull Storage storage) throws StorageException {
- //TODO
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/source/DataSource.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/source/DataSource.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/source/DataSource.java
deleted file mode 100644
index 227277c..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/source/DataSource.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.source;
-
-/**
- * Data source on where the data is located. Generally to indicate
- * if the data is persisted, if so where or if the data is transient
- * or temporary.
- */
-public interface DataSource {
-
- //TODO This is just place holder. Behaviors will be added as we expand on the design further
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storage.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storage.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storage.java
deleted file mode 100644
index 5b63b31..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storage.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.storage;
-
-import javax.annotation.Nonnull;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * This is the storage on which entities can be persisted and restored from.
- *
- * On the storage entities are organized under a namespace. Namespace/entity combination
- * is unique on the storage.
- */
-public interface Storage {
-
- /**
- * Opens an existing entity under the namespace and provides a Stream view of
- * that data for the consumer.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being opened/read.
- * @return - InputStream
- * @throws StorageException - If such an entity doesn't exist or has issues
- * reading from the storage.
- */
- @Nonnull
- InputStream open(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-
- /**
- * Creates / Updates a new entity under the namespace and provides a Stream to write out the
- * data. If entity already exists under the namespace, same is overwritten.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being created/updated.
- * @return - OutputStream
- * @throws StorageException - If it has issues accessing or writing to the storage.
- */
- @Nonnull
- OutputStream create(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-
- /**
- * Deletes an entity under the namespace specified if it exists.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being deleted.
- * @throws StorageException - If entity is missing or if there are issues while performing the
- * delete operation
- */
- void delete(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/StorageException.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/StorageException.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/StorageException.java
deleted file mode 100644
index c8c2f58..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/StorageException.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.storage;
-
-/**
- * Checked Exception that the {@link org.apache.falcon.designer.storage.Storeable}
- * throws when there is an issue with either storing or restoring contents
- * for an object from persistent storage.
- */
-public class StorageException extends Exception {
-
- /**
- * Constructs a default exception with no cause or message.
- */
- public StorageException() {
- super();
- }
-
- /**
- * Constructs an exception with a specific message.
- *
- * @param message - Message on the exception
- */
- public StorageException(String message) {
- super(message);
- }
-
- /**
- * Constructs an exception with a specific message and cause.
- *
- * @param message - Message on the exception
- * @param cause - Underlying exception that resulted in this being thrown
- */
- public StorageException(String message, Throwable cause) {
- super(message, cause);
- }
-
- /**
- * Constructs an exception with a cause and message is initialized
- * to be same as that of the cause.
- *
- * @param cause - Underlying exception that resulted in this being thrown
- */
- public StorageException(Throwable cause) {
- super(cause);
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storeable.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storeable.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storeable.java
deleted file mode 100644
index 384d17a..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Storeable.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.storage;
-
-import javax.annotation.Nonnull;
-
-/**
- * All elements in the designer that need to be persisted to permanent
- * storage need to implement this interface.
- */
-public interface Storeable {
-
- /**
- * Store the current object onto the storage being passed.
- *
- * @param storage - Storage onto which the object will be persisted or stored.
- * @throws StorageException - Any exception from the underlying storage.
- */
- void store(@Nonnull Storage storage) throws StorageException;
-
- /**
- * Restore onto the current object contents from the Storage.
- *
- * @param storage - Storage from where the object will be restored from.
- * @throws StorageException - Any exception from the underlying storage.
- */
- void restore(@Nonnull Storage storage) throws StorageException;
-
- /**
- * Deletes the current object from the storage permanently.
- *
- * @param storage - Storage on which the object is stored, that needs to be deleted
- * @throws StorageException - Any exception from the underlying storage.
- */
- void delete(@Nonnull Storage storage) throws StorageException;
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Version.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Version.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Version.java
deleted file mode 100644
index 35c2e86..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/Version.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.storage;
-
-import javax.annotation.Nonnull;
-
-/**
- * Version that represents a specific snapshot of an entity.
- */
-public class Version implements Comparable<Version> {
-
- private final int version;
- private final long timeStamp;
-
- public Version(int version, long timeStamp) {
- this.version = version;
- this.timeStamp = timeStamp;
- }
-
- public int getVersion() {
- return version;
- }
-
- public long getTimeStamp() {
- return timeStamp;
- }
-
- @Override
- public int compareTo(@Nonnull Version that) {
- return Integer.valueOf(this.version).compareTo(that.version);
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
-
- Version that = (Version) obj;
-
- if (this.version != that.version) {
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- return version;
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/VersionedStorage.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/VersionedStorage.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/VersionedStorage.java
deleted file mode 100644
index 7f5edc5..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/VersionedStorage.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.storage;
-
-import javax.annotation.Nonnull;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * This is the storage on which entities can be persisted and restored from for a
- * specific version.
- *
- * On the storage entities are organized under a namespace. Namespace/entity combination
- * is unique on the storage.
- */
-public interface VersionedStorage extends Storage {
-
- /**
- * Opens the latest version of the existing entity under the namespace and
- * provides a Stream view of that data for the consumer.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being opened/read.
- * @return - InputStream
- * @throws StorageException - If such an entity doesn't exist or has issues
- * reading from the storage.
- */
- @Override
- @Nonnull
- InputStream open(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-
- /**
- * Opens the latest version of the existing entity under the namespace and
- * provides a Stream view of that data for the consumer.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being opened/read.
- * @param version - Version of the entity that needs to be opened.
- * @return - InputStream
- * @throws StorageException - If such an entity/version doesn't exist or has issues
- * reading from the storage.
- */
- @Nonnull
- InputStream open(@Nonnull String namespace, @Nonnull String entity,
- @Nonnull Version version) throws StorageException;
-
- /**
- * Creates / Updates a new entity under the namespace and provides a Stream to write out the
- * data. If entity already exists under the namespace, a new version of the same is created.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being created/updated.
- * @return - OutputStream
- * @throws StorageException - If it has issues accessing or writing to the storage.
- */
-
- @Override
- @Nonnull
- OutputStream create(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-
- /**
- * Deletes the latest version of an entity under the namespace specified if it exists.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being deleted.
- * @throws StorageException - If entity is missing or if there are issues while performing the
- * delete operation
- */
- @Override
- void delete(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-
- /**
- * Deletes the latest version of an entity under the namespace specified if it exists.
- *
- * @param namespace - Namespace under which the entity is stored.
- * @param entity - Entity that is being deleted.
- * @param version - Version that is to be deleted.
- * @throws StorageException - If entity/version is missing or if there are issues while performing the
- * delete operation
- */
- void delete(@Nonnull String namespace, @Nonnull String entity,
- @Nonnull Version version) throws StorageException;
-
- /**
- * Retrieves an iterator over versions of the entity under the namespace specified.
- *
- * @param namespace - Namespace underwhich the entity is stored.
- * @param entity - Entity that is stored, for which versions are sought.
- * @return - Iterable {@link org.apache.falcon.designer.storage.Version}
- * @throws StorageException - If entity version is missing or if there are issues while retrieving
- * the versions on the entity.
- */
- @Nonnull
- Iterable<Version> versions(@Nonnull String namespace, @Nonnull String entity) throws StorageException;
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/impl/HDFSStorage.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/impl/HDFSStorage.java b/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/impl/HDFSStorage.java
deleted file mode 100644
index 4d0ff64..0000000
--- a/addons/designer/core/src/main/java/org/apache/falcon/designer/storage/impl/HDFSStorage.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.storage.impl;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.falcon.designer.storage.Storage;
-import org.apache.falcon.designer.storage.StorageException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-/**
- * Storage implementation to store in HDFS.
- *
- */
-public class HDFSStorage implements Storage {
-
- private FileSystem fs;
- private String basePath;
- private static final String SEPERATOR = "/";
- private static final String BASEPATH_CONFIG_NAME =
- "falcon.designer.hdfsstorage.defaultpath";
-
- HDFSStorage(Configuration conf) throws StorageException {
- try {
- this.fs = FileSystem.get(conf);
- } catch (IOException e) {
- throw new StorageException(e);
- }
- this.basePath = conf.get(BASEPATH_CONFIG_NAME);
- if (this.basePath == null || this.basePath.isEmpty()) {
- throw new StorageException(BASEPATH_CONFIG_NAME
- + " cannot be empty");
- }
-
- }
-
- @Override
- public InputStream open(String namespace, String entity)
- throws StorageException {
- try {
- return fs.open(new Path(basePath + SEPERATOR + namespace
- + SEPERATOR + entity));
-
- } catch (IllegalArgumentException e) {
- throw new StorageException(e);
- } catch (IOException e) {
- throw new StorageException(e);
- }
-
- }
-
- @Override
- public OutputStream create(String namespace, String entity)
- throws StorageException {
- try {
- return fs.create(new Path(basePath + SEPERATOR + namespace
- + SEPERATOR + entity));
- } catch (IllegalArgumentException e) {
- throw new StorageException(e);
- } catch (IOException e) {
- throw new StorageException(e);
- }
-
- }
-
- @Override
- public void delete(String namespace, String entity) throws StorageException {
- try {
- fs.delete(new Path(basePath + SEPERATOR + namespace + SEPERATOR
- + entity), true);
- } catch (IllegalArgumentException e) {
- throw new StorageException(e);
- } catch (IOException e) {
- throw new StorageException(e);
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/core/src/test/java/org/apache/falcon/designer/storage/impl/HDFSStorageTest.java
----------------------------------------------------------------------
diff --git a/addons/designer/core/src/test/java/org/apache/falcon/designer/storage/impl/HDFSStorageTest.java b/addons/designer/core/src/test/java/org/apache/falcon/designer/storage/impl/HDFSStorageTest.java
deleted file mode 100644
index 187a7d2..0000000
--- a/addons/designer/core/src/test/java/org/apache/falcon/designer/storage/impl/HDFSStorageTest.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.designer.storage.impl;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import org.apache.falcon.designer.storage.StorageException;
-import org.apache.hadoop.conf.Configuration;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-/**
- * Will test HDFSStorage for create open remove.
- */
-public class HDFSStorageTest {
- private HDFSStorage hdfsStorageInst;
-
- @BeforeClass
- public void setUpDFS() throws Exception {
- Configuration conf = new Configuration();
- conf.set("falcon.designer.hdfsstorage.defaultpath", "/tmp/");
- conf.set("fs.default.name", "file:///");
- hdfsStorageInst = new HDFSStorage(conf);
-
- }
-
- @Test
- public void testCreateOpenDelete() {
- try {
- final String testNameSpace = "testNS";
- final String testEntity = "testEntity";
- OutputStream opStream =
- hdfsStorageInst.create(testNameSpace, testEntity);
- String testMessage = "testing HDFSStorage";
- byte[] outputByte = new byte[testMessage.length()];
- opStream.write(testMessage.getBytes());
- opStream.close();
- InputStream ipStream =
- hdfsStorageInst.open(testNameSpace, testEntity);
- ipStream.read(outputByte, 0, testMessage.length());
- ipStream.close();
- hdfsStorageInst.delete(testNameSpace, testEntity);
- try {
- hdfsStorageInst.open(testNameSpace, testEntity);
- Assert
- .fail("file should be present and should have thrown an exception");
- } catch (StorageException ex) {
- Assert.assertEquals(ex.getCause().getClass(),
- FileNotFoundException.class);
- }
- Assert.assertEquals(new String(outputByte), testMessage);
- } catch (StorageException ex) {
- Assert.fail(ex.getMessage());
- } catch (IOException ex) {
- Assert.fail(ex.getMessage());
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/flows/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/flows/pom.xml b/addons/designer/flows/pom.xml
deleted file mode 100644
index 07cb35d..0000000
--- a/addons/designer/flows/pom.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- </parent>
- <artifactId>designer-flow</artifactId>
- <description>Apache Falcon Pipeline Designer - Flows Module</description>
- <name>Apache Falcon Designer Flows</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-core</artifactId>
- </dependency>
- </dependencies>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/flows/src/main/java/org/apache/falcon/designer/primitive/Flow.java
----------------------------------------------------------------------
diff --git a/addons/designer/flows/src/main/java/org/apache/falcon/designer/primitive/Flow.java b/addons/designer/flows/src/main/java/org/apache/falcon/designer/primitive/Flow.java
deleted file mode 100644
index f7d227c..0000000
--- a/addons/designer/flows/src/main/java/org/apache/falcon/designer/primitive/Flow.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.designer.primitive;
-
-import org.apache.falcon.designer.configuration.FlowConfig;
-
-/**
- * Concrete implementation for a Flow.
- */
-public class Flow extends Primitive<Flow, FlowConfig> {
-
- private FlowConfig process;
- private String nameSpace;
- private String entity;
-
- public Flow(FlowConfig process, String nameSpace, String entity) {
- this.process = process;
- this.nameSpace = nameSpace;
- this.entity = entity;
- }
-
- @Override
- protected Flow copy() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Iterable<Message> validate() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- protected Code doCompile() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- protected Flow doOptimize() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getNamespace() {
- return nameSpace;
- }
-
- @Override
- public String getEntity() {
- return entity;
- }
-
-
- @Override
- public void setConfiguration(FlowConfig config) {
- this.process = config;
- }
-
- @Override
- public FlowConfig getConfiguration() {
- return process;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/pom.xml b/addons/designer/pom.xml
deleted file mode 100644
index 4be24c3..0000000
--- a/addons/designer/pom.xml
+++ /dev/null
@@ -1,709 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- <description>Apache Falcon Pipeline Designer</description>
- <name>Apache Falcon Pipeline Designer</name>
- <packaging>pom</packaging>
- <url>http://falcon.apache.org/</url>
-
- <licenses>
- <license>
- <name>The Apache Software License, Version 2.0</name>
- <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
- </license>
- </licenses>
-
- <organization>
- <name>Apache Software Foundation</name>
- <url>http://www.apache.org</url>
- </organization>
-
- <issueManagement>
- <system>JIRA</system>
- <url>https://issues.apache.org/jira/browse/FALCON</url>
- </issueManagement>
-
- <ciManagement>
- <system>Jenkins</system>
- <url>https://builds.apache.org/job/falcon</url>
- </ciManagement>
-
- <inceptionYear>2013</inceptionYear>
-
- <mailingLists>
- <mailingList>
- <name>falcon-user</name>
- <subscribe>user-subscribe@falcon.apache.org</subscribe>
- <unsubscribe>user-unsubscribe@falcon.apache.org</unsubscribe>
- <post>user@falcon.apache.org</post>
- <archive>http://mail-archives.apache.org/mod_mbox/falcon-user/</archive>
- </mailingList>
- <mailingList>
- <name>falcon-dev</name>
- <subscribe>dev-subscribe@falcon.apache.org</subscribe>
- <unsubscribe>dev-unsubscribe@falcon.apache.org</unsubscribe>
- <post>dev@falcon.apache.org</post>
- <archive>http://mail-archives.apache.org/mod_mbox/falcon-dev/</archive>
- </mailingList>
- <mailingList>
- <name>falcon-commits</name>
- <subscribe>commits-subscribe@falcon.apache.org</subscribe>
- <unsubscribe>commits-unsubscribe@falcon.apache.org</unsubscribe>
- <post>commits@falcon.apache.org</post>
- <archive>http://mail-archives.apache.org/mod_mbox/falcon-commits/</archive>
- </mailingList>
- </mailingLists>
-
- <scm>
- <connection>scm:git:https://git-wip-us.apache.org/repos/asf/falcon.git</connection>
- <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/falcon.git</developerConnection>
- <url>https://git-wip-us.apache.org/repos/asf/falcon.git</url>
- </scm>
-
- <distributionManagement>
- <repository>
- <id>internal.repo</id>
- <name>Internal Repository</name>
- <url>${internal.maven.repo}</url>
- </repository>
- </distributionManagement>
-
- <properties>
- <!-- platform encoding override -->
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
-
- <include.prism>true</include.prism>
-
- <slf4j.version>1.7.5</slf4j.version>
- <hive.version>0.13.1</hive.version>
- <jetty.version>6.1.26</jetty.version>
- <internal.maven.repo>file:///tmp/falcontemprepo</internal.maven.repo>
- <skipCheck>false</skipCheck>
- <hadoop.version>2.6.2</hadoop.version>
- </properties>
-
- <modules>
- <module>checkstyle</module>
- <module>ui</module>
- <module>common</module>
- <module>core</module>
- <module>flows</module>
- <module>transforms</module>
- <module>actions</module>
- </modules>
-
- <repositories>
- <repository>
- <id>central</id>
- <url>http://repo1.maven.org/maven2</url>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
- <repository>
- <id>hortonworks.repo</id>
- <url>http://repo.hortonworks.com/content/repositories/releases</url>
- <name>Hortonworks Repo</name>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
- <repository>
- <id>Codehaus repository</id>
- <url>http://repository.codehaus.org/</url>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
- <repository>
- <id>apache.snapshots.repo</id>
- <url>https://repository.apache.org/content/groups/snapshots</url>
- <name>Apache Snapshots Repository</name>
- <snapshots>
- <enabled>true</enabled>
- </snapshots>
- </repository>
- <repository>
- <id>default</id>
- <url>https://repository.apache.org/content/groups/public/</url>
- </repository>
- <repository>
- <id>java.net-Public</id>
- <name>Maven Java Net Snapshots and Releases</name>
- <url>https://maven.java.net/content/groups/public/</url>
- </repository>
- <repository>
- <id>repository.jboss.org-public</id>
- <name>JBoss repository</name>
- <url>https://repository.jboss.org/nexus/content/groups/public</url>
- </repository>
- <repository>
- <id>cdh.repo</id>
- <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
- <name>Cloudera Repository</name>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
- </repositories>
-
- <dependencyManagement>
- <dependencies>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-log4j12</artifactId>
- <version>${slf4j.version}</version>
- </dependency>
-
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <version>2.6</version>
- </dependency>
-
- <dependency>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- <version>1.2.17</version>
- <scope>compile</scope>
- <exclusions>
- <exclusion>
- <groupId>com.sun.jdmk</groupId>
- <artifactId>jmxtools</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.sun.jmx</groupId>
- <artifactId>jmxri</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.mail</groupId>
- <artifactId>mail</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.jms</groupId>
- <artifactId>jmx</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.jms</groupId>
- <artifactId>jms</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.easymock</groupId>
- <artifactId>easymock</artifactId>
- <version>2.4</version>
- <scope>test</scope>
- </dependency>
-
- <dependency>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-core-asl</artifactId>
- <version>1.5.2</version>
- </dependency>
-
- <dependency>
- <groupId>org.codehaus.jackson</groupId>
- <artifactId>jackson-mapper-asl</artifactId>
- <version>1.5.2</version>
- </dependency>
-
- <dependency>
- <groupId>commons-cli</groupId>
- <artifactId>commons-cli</artifactId>
- <version>1.2</version>
- </dependency>
-
- <dependency>
- <groupId>commons-codec</groupId>
- <artifactId>commons-codec</artifactId>
- <version>1.8</version>
- </dependency>
-
- <dependency>
- <groupId>commons-io</groupId>
- <artifactId>commons-io</artifactId>
- <version>2.2</version>
- </dependency>
-
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-client</artifactId>
- <version>1.8</version>
- </dependency>
-
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-json</artifactId>
- <version>1.8</version>
- </dependency>
-
- <dependency>
- <groupId>commons-beanutils</groupId>
- <artifactId>commons-beanutils</artifactId>
- <version>1.8.3</version>
- </dependency>
-
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-client</artifactId>
- <version>${project.version}</version>
- </dependency>
-
- <dependency>
- <groupId>javax.xml.bind</groupId>
- <artifactId>jaxb-api</artifactId>
- <version>2.1</version>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- <version>6.1.1</version>
- <scope>test</scope>
- </dependency>
-
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- <version>${slf4j.version}</version>
- </dependency>
-
- <dependency>
- <groupId>org.codehaus.jettison</groupId>
- <artifactId>jettison</artifactId>
- <version>1.3</version>
- </dependency>
-
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- <version>1.8</version>
- </dependency>
-
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <version>1.8.5</version>
- <scope>provided</scope>
- </dependency>
-
- <dependency>
- <groupId>org.aspectj</groupId>
- <artifactId>aspectjrt</artifactId>
- <version>1.6.11</version>
- </dependency>
-
- <dependency>
- <groupId>org.aspectj</groupId>
- <artifactId>aspectjweaver</artifactId>
- <version>1.6.11</version>
- </dependency>
-
- <dependency>
- <groupId>com.googlecode.json-simple</groupId>
- <artifactId>json-simple</artifactId>
- <version>1.1</version>
- </dependency>
-
- <dependency>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty</artifactId>
- <version>${jetty.version}</version>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty-plus</artifactId>
- <version>${jetty.version}</version>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>commons-el</groupId>
- <artifactId>commons-el</artifactId>
- <version>1.0</version>
- </dependency>
-
- <dependency>
- <groupId>javax.servlet.jsp</groupId>
- <artifactId>jsp-api</artifactId>
- <version>2.0</version>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${hive.version}</version>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hive.hcatalog</groupId>
- <artifactId>hive-webhcat-java-client</artifactId>
- <version>${hcatalog.version}</version>
- </dependency>
-
- <dependency>
- <groupId>net.sourceforge.findbugs</groupId>
- <artifactId>jsr305</artifactId>
- <version>1.3.2</version>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-core</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <version>${hadoop.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <version>${hadoop.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <version>${hadoop.version}</version>
- <scope>test</scope>
- <classifier>tests</classifier>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <version>${hadoop.version}</version>
- <scope>test</scope>
- <classifier>tests</classifier>
- </dependency>
- </dependencies>
- </dependencyManagement>
-
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>buildnumber-maven-plugin</artifactId>
- <version>1.0</version>
- </plugin>
-
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- <version>1.5</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <version>2.3.2</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-source-plugin</artifactId>
- <version>2.2.1</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- <version>2.8.1</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <version>2.14</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-deploy-plugin</artifactId>
- <version>2.7</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-war-plugin</artifactId>
- <version>2.1.1</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.rat</groupId>
- <artifactId>apache-rat-plugin</artifactId>
- <version>0.7</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- <version>2.9.1</version>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-site-plugin</artifactId>
- <version>3.2</version>
- </plugin>
-
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>findbugs-maven-plugin</artifactId>
- <version>2.5.2</version>
- </plugin>
- <!-- Source code metrics: mvn javancss:report or mvn site -->
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>javancss-maven-plugin</artifactId>
- <version>2.0</version>
- </plugin>
- </plugins>
- </pluginManagement>
-
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>buildnumber-maven-plugin</artifactId>
- <executions>
- <execution>
- <phase>validate</phase>
- <goals>
- <goal>create</goal>
- </goals>
- </execution>
- </executions>
- <configuration>
- <revisionOnScmFailure>release</revisionOnScmFailure>
- <doCheck>false</doCheck>
- <doUpdate>false</doUpdate>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <configuration>
- <source>1.7</source>
- <target>1.7</target>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-source-plugin</artifactId>
- <executions>
- <execution>
- <id>attach-sources</id>
- <phase>package</phase>
- <goals>
- <goal>jar-no-fork</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- <executions>
- <execution>
- <id>attach-javadocs</id>
- <phase>package</phase>
- <goals>
- <goal>javadoc</goal>
- <goal>jar</goal>
- </goals>
- <configuration>
- <skip>${skipCheck}</skip>
- </configuration>
- </execution>
- </executions>
- <configuration>
- <skip>${skipCheck}</skip>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <configuration>
- <redirectTestOutputToFile>true</redirectTestOutputToFile>
- <forkMode>always</forkMode>
- <argLine>-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc=</argLine>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-deploy-plugin</artifactId>
- <executions>
- <execution>
- <id>deploy</id>
- <phase>deploy</phase>
- <goals>
- <goal>deploy</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
-
- <plugin>
- <groupId>org.apache.rat</groupId>
- <artifactId>apache-rat-plugin</artifactId>
- <configuration>
- <useDefaultExcludes>true</useDefaultExcludes>
- <useMavenDefaultExcludes>true</useMavenDefaultExcludes>
- <useIdeaDefaultExcludes>true</useIdeaDefaultExcludes>
- <useEclipseDefaultExcludes>true</useEclipseDefaultExcludes>
- <excludeSubProjects>true</excludeSubProjects>
- <excludes>
- <exclude>**/*.txt</exclude>
- <exclude>**/.git</exclude>
- <exclude>.idea/**</exclude>
- <exclude>**/*.twiki</exclude>
- <exclude>**/*.iml</exclude>
- <exclude>**/target</exclude>
- <exclude>**/*.patch</exclude>
- <exclude>**/*.log</exclude>
- <exclude>**/logs</exclude>
- <exclude>**/.classpath</exclude>
- <exclude>**/.project</exclude>
- <exclude>**/.settings</exclude>
- <exclude>**/maven-eclipse.xml</exclude>
- <exclude>**/.externalToolBuilders</exclude>
- <exclude>build/**</exclude>
- <exclude>test-output/**</exclude>
- </excludes>
- </configuration>
- <executions>
- <execution>
- <id>rat-check</id>
- <goals>
- <goal>check</goal>
- </goals>
- <phase>verify</phase>
- <configuration>
- <failOnViolation>false</failOnViolation>
- </configuration>
- </execution>
- </executions>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>checkstyle</artifactId>
- <version>${project.version}</version>
- </dependency>
- </dependencies>
- <executions>
- <execution>
- <id>checkstyle-check</id>
- <goals>
- <goal>check</goal>
- </goals>
- <phase>verify</phase>
- <configuration>
- <consoleOutput>true</consoleOutput>
- <includeTestSourceDirectory>true</includeTestSourceDirectory>
- <configLocation>falcon/checkstyle.xml</configLocation>
- <failOnViolation>true</failOnViolation>
- <skip>${skipCheck}</skip>
- </configuration>
- </execution>
- </executions>
- </plugin>
-
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>findbugs-maven-plugin</artifactId>
- <configuration>
- <!--debug>true</debug -->
- <xmlOutput>true</xmlOutput>
- <excludeFilterFile>${basedir}/../checkstyle/src/main/resources/falcon/findbugs-exclude.xml</excludeFilterFile>
- <failOnError>true</failOnError>
- <skip>${skipCheck}</skip>
- </configuration>
- <executions>
- <execution>
- <id>findbugs-check</id>
- <goals>
- <goal>check</goal>
- </goals>
- <phase>verify</phase>
- </execution>
- </executions>
- </plugin>
- <!-- Source code metrics: mvn javancss:report or mvn site -->
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>javancss-maven-plugin</artifactId>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-site-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.apache.maven.doxia</groupId>
- <artifactId>doxia-module-twiki</artifactId>
- <version>1.3</version>
- </dependency>
- </dependencies>
- <executions>
- <execution>
- <goals>
- <goal>site</goal>
- </goals>
- <phase>prepare-package</phase>
- </execution>
- </executions>
- <configuration>
- <skip>true</skip>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-release-plugin</artifactId>
- <version>2.4.1</version>
- </plugin>
- </plugins>
- </build>
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/transforms/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/transforms/pom.xml b/addons/designer/transforms/pom.xml
deleted file mode 100644
index 3c4fd1e..0000000
--- a/addons/designer/transforms/pom.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- </parent>
- <artifactId>designer-transform</artifactId>
- <description>Apache Falcon Pipeline Designer - Transform Module</description>
- <name>Apache Falcon Designer Transform</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- </dependencies>
-
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/ui/pom.xml
----------------------------------------------------------------------
diff --git a/addons/designer/ui/pom.xml b/addons/designer/ui/pom.xml
deleted file mode 100644
index 50e134a..0000000
--- a/addons/designer/ui/pom.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.falcon.designer</groupId>
- <artifactId>designer-main</artifactId>
- <version>0.6-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
- <artifactId>designer-ui</artifactId>
- <description>Apache Falcon Pipeline Designer UI</description>
- <name>Apache Falcon Pipeline Designer UI</name>
- <packaging>war</packaging>
-
- <dependencies>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-client</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-json</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.googlecode.json-simple</groupId>
- <artifactId>json-simple</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty-plus</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.aspectj</groupId>
- <artifactId>aspectjrt</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.aspectj</groupId>
- <artifactId>aspectjweaver</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-war-plugin</artifactId>
- <version>2.4</version>
- </plugin>
- </plugins>
- </build>
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/designer/ui/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/addons/designer/ui/src/main/webapp/WEB-INF/web.xml b/addons/designer/ui/src/main/webapp/WEB-INF/web.xml
deleted file mode 100644
index 86d6f10..0000000
--- a/addons/designer/ui/src/main/webapp/WEB-INF/web.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<!DOCTYPE web-app PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
- "http://java.sun.com/dtd/web-app_2_3.dtd">
-
-<web-app>
-
- <display-name>Apache Falcon Pipeline Designer</display-name>
- <description>Apache Falcon Pipeline Designer</description>
-
- <servlet>
- <servlet-name>pipelineApi</servlet-name>
- <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
- <init-param>
- <param-name>com.sun.jersey.config.property.resourceConfigClass</param-name>
- <param-value>com.sun.jersey.api.core.PackagesResourceConfig</param-value>
- </init-param>
- <init-param>
- <param-name>com.sun.jersey.config.property.packages</param-name>
- <param-value>
- org.apache.falcon.designer.resource
- </param-value>
- </init-param>
- <load-on-startup>1</load-on-startup>
- </servlet>
-
- <servlet-mapping>
- <servlet-name>pipelineApi</servlet-name>
- <url-pattern>/api/*</url-pattern>
- </servlet-mapping>
-
-</web-app>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/README
----------------------------------------------------------------------
diff --git a/addons/hivedr/README b/addons/hivedr/README
deleted file mode 100644
index 0b448d3..0000000
--- a/addons/hivedr/README
+++ /dev/null
@@ -1,80 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hive Disaster Recovery
-=======================
-
-Overview
----------
-
-Falcon provides feature to replicate Hive metadata and data events from one hadoop cluster
-to another cluster. This is supported for secure and unsecure cluster through Falcon Recipes.
-
-
-Prerequisites
--------------
-
-Following is the prerequisites to use Hive DR
-
-* Hive 1.2.0+
-* Oozie 4.2.0+
-
-*Note:* Set following properties in hive-site.xml for replicating the Hive events:
- <property>
- <name>hive.metastore.event.listeners</name>
- <value>org.apache.hive.hcatalog.listener.DbNotificationListener</value>
- <description>event listeners that are notified of any metastore changes</description>
- </property>
-
- <property>
- <name>hive.metastore.dml.events</name>
- <value>true</value>
- </property>
-
-
-Usage
-------
-a. Perform initial bootstrap of Table and Database from one Hadoop cluster to another Hadoop cluster
-
- Table Bootstrap
- ----------------
- For bootstrapping table replication, essentially after having turned on the DbNotificationListener
- on the source db, we should do an EXPORT of the table, distcp the export over to the destination
- warehouse, and do an IMPORT over there. Check following Hive Export-Import link for syntax details
- and examples.
-
- This will set up the destination table so that the events on the source cluster that modify the table
- will then be replicated over.
-
- Database Bootstrap
- ------------------
- For bootstrapping DB replication, first destination DB should be created. This step is expected,
- since DB replication definitions can be set up by users only on pre-existing DB’s. Second, we need
- to export all tables in the source db and import them in the destination db, as described above.
-
-
-b. Setup cluster definition
- $FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-
-c. Submit Hive DR recipe
- $FALCON_HOME/bin/falcon recipe -name hive-disaster-recovery -operation HIVE_DISASTER_RECOVERY
-
-
-Recipe templates for Hive DR is available in addons/recipe/hive-disaster-recovery and copy it to
-recipe path specified in client.properties.
-
-*Note:* If kerberos security is enabled on cluster, use the secure templates for Hive DR from
- addons/recipe/hive-disaster-recovery
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/pom.xml
----------------------------------------------------------------------
diff --git a/addons/hivedr/pom.xml b/addons/hivedr/pom.xml
deleted file mode 100644
index 37dc5c9..0000000
--- a/addons/hivedr/pom.xml
+++ /dev/null
@@ -1,209 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-main</artifactId>
- <version>0.10-SNAPSHOT</version>
- <relativePath>../../pom.xml</relativePath>
- </parent>
- <artifactId>falcon-hive-replication</artifactId>
- <description>Apache Falcon Hive Replication Module</description>
- <name>Apache Falcon Hive Replication</name>
- <packaging>jar</packaging>
-
- <dependencies>
- <!-- dependencies are always listed in sorted order by groupId, artifactId -->
- <!-- intra-project -->
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-exec</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-shims</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive.hcatalog</groupId>
- <artifactId>hive-webhcat-java-client</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-jdbc</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpclient</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- </dependency>
- <!-- inter-project -->
- <dependency>
- <groupId>javax.jms</groupId>
- <artifactId>jms</artifactId>
- </dependency>
- <!-- test intra-project -->
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-cli</artifactId>
- <version>${hive.version}</version>
- <scope>test</scope>
- </dependency>
- <!-- test inter-project -->
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>3.8.1</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-test-util</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-hadoop-dependencies</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.falcon</groupId>
- <artifactId>falcon-metrics</artifactId>
- </dependency>
- </dependencies>
-
- <profiles>
- <profile>
- <id>hadoop-2</id>
- <activation>
- <activeByDefault>true</activeByDefault>
- </activation>
- <dependencies>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- <scope>compile</scope>
- <version>${hadoop.version}</version>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-common</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-server-nodemanager</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-auth</artifactId>
- <scope>compile</scope>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-distcp</artifactId>
- <scope>compile</scope>
- </dependency>
- </dependencies>
- </profile>
- </profiles>
-
- <build>
- <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
- <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-jar-plugin</artifactId>
- <executions>
- <execution>
- <goals>
- <goal>test-jar</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <configuration>
- <systemProperties>
- <property>
- <name>derby.stream.error.file</name>
- <value>target/derby.log</value>
- </property>
- </systemProperties>
- </configuration>
- </plugin>
- </plugins>
- </build>
-
-</project>
[2/7] falcon git commit: Removing addons/ non-docs directory from
asf-site branch
Posted by pa...@apache.org.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java
deleted file mode 100644
index bfeca8d..0000000
--- a/addons/hivedr/src/test/java/org/apache/falcon/hive/DBReplicationStatusTest.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.hive;
-
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.util.DBReplicationStatus;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Unit tests for DBReplicationStatus.
- */
-@Test
-public class DBReplicationStatusTest {
-
- private Map<String, ReplicationStatus> tableStatuses = new HashMap<String, ReplicationStatus>();
- private ReplicationStatus dbReplicationStatus;
- private ReplicationStatus tableStatus1;
-
- public DBReplicationStatusTest() {
- }
-
- @BeforeClass
- public void prepare() throws Exception {
- dbReplicationStatus = new ReplicationStatus("source", "target", "jobname",
- "Default1", null, ReplicationStatus.Status.FAILURE, 20L);
- tableStatus1 = new ReplicationStatus("source", "target", "jobname",
- "default1", "Table1", ReplicationStatus.Status.SUCCESS, 20L);
- tableStatuses.put("Table1", tableStatus1);
-
- }
-
- public void dBReplicationStatusSerializeTest() throws Exception {
- DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses);
-
- String expected = "{\n" + " \"db_status\": {\n"
- + " \"sourceUri\": \"source\",\n" + " \"targetUri\": \"target\",\n"
- + " \"jobName\": \"jobname\",\n" + " \"database\": \"default1\",\n"
- + " \"status\": \"FAILURE\",\n" + " \"eventId\": 20\n" + " },\n"
- + " \"table_status\": {\"table1\": {\n" + " \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n" + " \"jobName\": \"jobname\",\n"
- + " \"database\": \"default1\",\n" + " \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n" + " \"eventId\": 20\n" + " }}\n" + "}";
- String actual = replicationStatus.toJsonString();
- Assert.assertEquals(actual, expected);
- }
-
- public void dBReplicationStatusDeserializeTest() throws Exception {
-
- String jsonString = "{\"db_status\":{\"sourceUri\":\"source\","
- + "\"targetUri\":\"target\",\"jobName\":\"jobname\",\"database\":\"default1\",\"status\":\"SUCCESS\","
- + "\"eventId\":20},\"table_status\":{\"Table1\":{\"sourceUri\":\"source\",\"targetUri\":\"target\","
- + "\"jobName\":\"jobname\",\"database\":\"default1\",\"table\":\"Table1\",\"status\":\"SUCCESS\","
- + "\"eventId\":20},\"table3\":{\"sourceUri\":\"source\",\"targetUri\":\"target\","
- + "\"jobName\":\"jobname\", \"database\":\"Default1\",\"table\":\"table3\",\"status\":\"FAILURE\","
- + "\"eventId\":10}, \"table2\":{\"sourceUri\":\"source\",\"targetUri\":\"target\","
- + "\"jobName\":\"jobname\", \"database\":\"default1\",\"table\":\"table2\",\"status\":\"INIT\"}}}";
-
- DBReplicationStatus dbStatus = new DBReplicationStatus(jsonString);
- Assert.assertEquals(dbStatus.getDatabaseStatus().getDatabase(), "default1");
- Assert.assertEquals(dbStatus.getDatabaseStatus().getJobName(), "jobname");
- Assert.assertEquals(dbStatus.getDatabaseStatus().getEventId(), 20);
-
- Assert.assertEquals(dbStatus.getTableStatuses().get("table1").getEventId(), 20);
- Assert.assertEquals(dbStatus.getTableStatuses().get("table1").getStatus(), ReplicationStatus.Status.SUCCESS);
- Assert.assertEquals(dbStatus.getTableStatuses().get("table2").getEventId(), -1);
- Assert.assertEquals(dbStatus.getTableStatuses().get("table2").getStatus(), ReplicationStatus.Status.INIT);
- Assert.assertEquals(dbStatus.getTableStatuses().get("table3").getEventId(), 10);
- Assert.assertEquals(dbStatus.getTableStatuses().get("table3").getStatus(), ReplicationStatus.Status.FAILURE);
-
-
- }
-
- public void wrongDBForTableTest() throws Exception {
-
- ReplicationStatus newDbStatus = new ReplicationStatus("source", "target", "jobname",
- "wrongDb", null, ReplicationStatus.Status.FAILURE, 20L);
- new DBReplicationStatus(newDbStatus);
-
- try {
- new DBReplicationStatus(newDbStatus, tableStatuses);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Cannot set status for table default1.table1, It does not belong to DB wrongdb");
- }
-
- String jsonString = "{\n" + " \"db_status\": {\n"
- + " \"sourceUri\": \"source\",\n" + " \"targetUri\": \"target\",\n"
- + " \"jobName\": \"jobname\",\n" + " \"database\": \"wrongdb\",\n"
- + " \"status\": \"FAILURE\",\n" + " \"eventId\": 20\n" + " },\n"
- + " \"table_status\": {\"table1\": {\n" + " \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n" + " \"jobName\": \"jobname\",\n"
- + " \"database\": \"default1\",\n" + " \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n" + " \"eventId\": 20\n" + " }}\n" + "}";
-
- try {
- new DBReplicationStatus(jsonString);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Unable to create DBReplicationStatus from JsonString. Cannot set status for "
- + "table default1.table1, It does not belong to DB wrongdb");
- }
- }
-
- public void updateTableStatusTest() throws Exception {
- DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses);
- replicationStatus.updateTableStatus(tableStatus1);
-
- // wrong DB test
- try {
- replicationStatus.updateTableStatus(new ReplicationStatus("source", "target", "jobname",
- "wrongDB", "table2", ReplicationStatus.Status.INIT, -1L));
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Cannot update Table Status. TableDB wrongdb does not match current DB default1");
- }
-
- // wrong status test
- try {
- replicationStatus.updateTableStatus(dbReplicationStatus);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Cannot update Table Status. Table name is empty.");
- }
-
- }
-
- public void updateDBStatusTest() throws Exception {
- DBReplicationStatus replicationStatus = new DBReplicationStatus(dbReplicationStatus, tableStatuses);
- replicationStatus.updateDbStatus(dbReplicationStatus);
-
- // wrong DB test
- try {
- replicationStatus.updateDbStatus(new ReplicationStatus("source", "target", "jobname",
- "wrongDB", null, ReplicationStatus.Status.INIT, -1L));
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Cannot update Database Status. StatusDB wrongdb does not match current DB default1");
- }
-
- // wrong status test
- try {
- replicationStatus.updateDbStatus(tableStatus1);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Cannot update DB Status. This is table level status.");
- }
- }
-
- public void updateDbStatusFromTableStatusesTest() throws Exception {
-
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname",
- "default1", null, ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname",
- "default1", "table1", ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname",
- "Default1", "table2", ReplicationStatus.Status.INIT, -1L);
- ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname",
- "default1", "Table3", ReplicationStatus.Status.FAILURE, 15L);
- ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname",
- "Default1", "Table4", ReplicationStatus.Status.FAILURE, 18L);
- Map<String, ReplicationStatus> tables = new HashMap<String, ReplicationStatus>();
-
- tables.put("table1", table1);
- tables.put("table2", table2);
- tables.put("table3", table3);
- tables.put("table4", table4);
-
- // If there is a failue, last eventId should be lowest eventId of failed tables
- DBReplicationStatus status = new DBReplicationStatus(dbStatus, tables);
- Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20);
- Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS);
- status.updateDbStatusFromTableStatuses();
- Assert.assertEquals(status.getDatabaseStatus().getEventId(), 15);
- Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.FAILURE);
-
- // If all tables succeed, last eventId should be highest eventId of success tables
- table3 = new ReplicationStatus("source", "target", "jobname",
- "default1", "table3", ReplicationStatus.Status.SUCCESS, 25L);
- table4 = new ReplicationStatus("source", "target", "jobname",
- "default1", "table4", ReplicationStatus.Status.SUCCESS, 22L);
- tables.put("Table3", table3);
- tables.put("Table4", table4);
- status = new DBReplicationStatus(dbStatus, tables);
- status.updateDbStatusFromTableStatuses();
- Assert.assertEquals(status.getDatabaseStatus().getEventId(), 25);
- Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS);
-
- // Init tables should not change DB status.
- Map<String, ReplicationStatus> initOnlyTables = new HashMap<String, ReplicationStatus>();
- initOnlyTables.put("table2", table2);
- dbStatus = new ReplicationStatus("source", "target", "jobname",
- "default1", null, ReplicationStatus.Status.SUCCESS, 20L);
- status = new DBReplicationStatus(dbStatus, initOnlyTables);
- Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20);
- Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS);
- status.updateDbStatusFromTableStatuses();
- Assert.assertEquals(status.getDatabaseStatus().getEventId(), 20);
- Assert.assertEquals(status.getDatabaseStatus().getStatus(), ReplicationStatus.Status.SUCCESS);
-
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java
deleted file mode 100644
index 1f44b62..0000000
--- a/addons/hivedr/src/test/java/org/apache/falcon/hive/DRTest.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-/**
- * Test class for DR.
- */
-public class DRTest {
- public void testHiveDr(String[] args) {
- String[] testArgs = {
- "-sourceMetastoreUri", "thrift://localhost:9083",
- "-sourceDatabase", "default",
- "-sourceTable", "test",
- "-sourceStagingPath", "/apps/hive/tools/dr",
- "-sourceNN", "hdfs://localhost:8020",
- "-sourceRM", "local",
-
- "-targetMetastoreUri", "thrift://localhost:9083",
- "-targetStagingPath", "/apps/hive/tools/dr",
- "-targetNN", "hdfs://localhost:8020",
- "-targetRM", "local",
-
- "-maxEvents", "5",
- "-replicationMaxMaps", "1",
- "-distcpMapBandwidth", "4",
- };
- HiveDRTool.main(testArgs);
- }
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java
deleted file mode 100644
index 5bc39df..0000000
--- a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRStatusStoreTest.java
+++ /dev/null
@@ -1,343 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.hive;
-
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.hadoop.JailedFileSystem;
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.HiveDRStatusStore;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.testng.Assert;
-import org.testng.annotations.AfterClass;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-
-/**
- * Unit tests for HiveDRStatusStore.
- */
-@Test
-public class HiveDRStatusStoreTest {
- private HiveDRStatusStore drStatusStore;
- private FileSystem fileSystem = new JailedFileSystem();
-
- public HiveDRStatusStoreTest() throws Exception {
- EmbeddedCluster cluster = EmbeddedCluster.newCluster("hiveReplTest");
- Path storePath = new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH);
-
- fileSystem.initialize(LocalFileSystem.getDefaultUri(cluster.getConf()), cluster.getConf());
- if (fileSystem.exists(storePath)) {
- fileSystem.delete(storePath, true);
- }
- FileSystem.mkdirs(fileSystem, storePath, DRStatusStore.DEFAULT_STORE_PERMISSION);
- drStatusStore = new HiveDRStatusStore(fileSystem, fileSystem.getFileStatus(storePath).getGroup());
- }
-
- @BeforeClass
- public void updateReplicationStatusTest() throws Exception {
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname",
- "Default1", null, ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname",
- "Default1", "table1", ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname",
- "default1", "Table2", ReplicationStatus.Status.INIT, -1L);
- ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname",
- "Default1", "Table3", ReplicationStatus.Status.FAILURE, 15L);
- ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname",
- "default1", "table4", ReplicationStatus.Status.FAILURE, 18L);
- ArrayList<ReplicationStatus> replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(table1);
- replicationStatusList.add(table2);
- replicationStatusList.add(table3);
- replicationStatusList.add(table4);
- replicationStatusList.add(dbStatus);
- drStatusStore.updateReplicationStatus("jobname", replicationStatusList);
- }
-
- @Test(expectedExceptions = IOException.class,
- expectedExceptionsMessageRegExp = ".*does not have correct ownership/permissions.*")
- public void testDrStatusStoreWithFakeUser() throws IOException {
- new HiveDRStatusStore(fileSystem, "fakeGroup");
- }
-
- public void updateReplicationStatusNewTablesTest() throws Exception {
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname2",
- "default2", null, ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname2",
- "Default2", "table1", ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table2 = new ReplicationStatus("source", "target", "jobname2",
- "default2", "Table2", ReplicationStatus.Status.INIT, -1L);
- ReplicationStatus table3 = new ReplicationStatus("source", "target", "jobname2",
- "default2", "table3", ReplicationStatus.Status.FAILURE, 15L);
- ReplicationStatus table4 = new ReplicationStatus("source", "target", "jobname2",
- "Default2", "Table4", ReplicationStatus.Status.FAILURE, 18L);
- ArrayList<ReplicationStatus> replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(table1);
- replicationStatusList.add(table2);
- replicationStatusList.add(table3);
- replicationStatusList.add(table4);
- replicationStatusList.add(dbStatus);
-
- drStatusStore.updateReplicationStatus("jobname2", replicationStatusList);
- ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default2");
- Assert.assertEquals(status.getEventId(), 15);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE);
- Assert.assertEquals(status.getJobName(), "jobname2");
- Assert.assertEquals(status.getTable(), null);
- Assert.assertEquals(status.getSourceUri(), "source");
-
- Iterator<ReplicationStatus> iter = drStatusStore.getTableReplicationStatusesInDb("source", "target",
- "jobname2", "default2");
- int size = 0;
- while(iter.hasNext()) {
- iter.next();
- size++;
- }
- Assert.assertEquals(4, size);
-
- table3 = new ReplicationStatus("source", "target", "jobname2",
- "default2", "table3", ReplicationStatus.Status.SUCCESS, 25L);
- table4 = new ReplicationStatus("source", "target", "jobname2",
- "Default2", "table4", ReplicationStatus.Status.SUCCESS, 22L);
- ReplicationStatus table5 = new ReplicationStatus("source", "target", "jobname2",
- "default2", "Table5", ReplicationStatus.Status.SUCCESS, 18L);
- ReplicationStatus db1table1 = new ReplicationStatus("source", "target", "jobname2",
- "Default1", "Table1", ReplicationStatus.Status.SUCCESS, 18L);
- replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(table5);
- replicationStatusList.add(table3);
- replicationStatusList.add(table4);
- replicationStatusList.add(db1table1);
-
- drStatusStore.updateReplicationStatus("jobname2", replicationStatusList);
- status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default1");
- Assert.assertEquals(status.getEventId(), 18);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS);
-
- status = drStatusStore.getReplicationStatus("source", "target", "jobname2", "default2");
- Assert.assertEquals(status.getEventId(), 25);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS);
-
- iter = drStatusStore.getTableReplicationStatusesInDb("source", "target",
- "jobname2", "default2");
- size = 0;
- while(iter.hasNext()) {
- iter.next();
- size++;
- }
- Assert.assertEquals(5, size);
- }
-
- public void getReplicationStatusDBTest() throws HiveReplicationException {
- ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "jobname", "Default1");
- Assert.assertEquals(status.getEventId(), 15);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE);
- Assert.assertEquals(status.getJobName(), "jobname");
- Assert.assertEquals(status.getTable(), null);
- Assert.assertEquals(status.getSourceUri(), "source");
- }
-
- public void checkReplicationConflictTest() throws HiveReplicationException {
-
- try {
- //same source, same job, same DB, null table : pass
- drStatusStore.checkForReplicationConflict("source", "jobname", "default1", null);
-
- //same source, same job, same DB, same table : pass
- drStatusStore.checkForReplicationConflict("source", "jobname", "default1", "table1");
-
- //same source, same job, different DB, null table : pass
- drStatusStore.checkForReplicationConflict("source", "jobname", "diffDB", null);
-
- //same source, same job, different DB, different table : pass
- drStatusStore.checkForReplicationConflict("source", "jobname", "diffDB", "diffTable");
-
- // same source, different job, same DB, diff table : pass
- drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", "diffTable");
- } catch (Exception e) {
- Assert.fail(e.getMessage());
- }
-
- try {
- // different source, same job, same DB, null table : fail
- drStatusStore.checkForReplicationConflict("diffSource", "jobname", "default1", null);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Two different sources are attempting to replicate to same db default1."
- + " New Source = diffSource, Existing Source = source");
- }
-
- try {
- // same source, different job, same DB, null table : fail
- drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", null);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Two different jobs are attempting to replicate to same db default1."
- + " New Job = diffJob, Existing Job = jobname");
- }
-
- try {
- // same source, different job, same DB, same table : fail
- drStatusStore.checkForReplicationConflict("source", "diffJob", "default1", "table1");
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Two different jobs are trying to replicate to same table table1."
- + " New job = diffJob, Existing job = jobname");
- }
-
-
- }
-
- public void deleteReplicationStatusTest() throws Exception {
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "deleteJob",
- "deleteDB", null, ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table1 = new ReplicationStatus("source", "target", "deleteJob",
- "deleteDB", "Table1", ReplicationStatus.Status.SUCCESS, 20L);
- ArrayList<ReplicationStatus> replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(table1);
- replicationStatusList.add(dbStatus);
- drStatusStore.updateReplicationStatus("deleteJob", replicationStatusList);
-
- ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target", "deleteJob", "deleteDB");
- Path statusPath = drStatusStore.getStatusDirPath(status.getDatabase(), status.getJobName());
- Assert.assertEquals(fileSystem.exists(statusPath), true);
-
- drStatusStore.deleteReplicationStatus("deleteJob", "deleteDB");
- Assert.assertEquals(fileSystem.exists(statusPath), false);
- }
-
- public void getReplicationStatusTableTest() throws HiveReplicationException {
- ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target",
- "jobname", "default1", "table1");
- Assert.assertEquals(status.getEventId(), 20);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.SUCCESS);
- Assert.assertEquals(status.getTable(), "table1");
-
- status = drStatusStore.getReplicationStatus("source", "target",
- "jobname", "Default1", "Table2");
- Assert.assertEquals(status.getEventId(), -1);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.INIT);
- Assert.assertEquals(status.getTable(), "table2");
-
- status = drStatusStore.getReplicationStatus("source", "target",
- "jobname", "default1", "Table3");
- Assert.assertEquals(status.getEventId(), 15);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE);
- Assert.assertEquals(status.getTable(), "table3");
-
- status = drStatusStore.getReplicationStatus("source", "target",
- "jobname", "default1", "table4");
- Assert.assertEquals(status.getEventId(), 18);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE);
- Assert.assertEquals(status.getTable(), "table4");
- }
-
- public void getTableReplicationStatusesInDbTest() throws HiveReplicationException {
- Iterator<ReplicationStatus> iter = drStatusStore.getTableReplicationStatusesInDb("source", "target",
- "jobname", "Default1");
- int size = 0;
- while(iter.hasNext()) {
- size++;
- ReplicationStatus status = iter.next();
- if (status.getTable().equals("table3")) {
- Assert.assertEquals(status.getEventId(), 15);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.FAILURE);
- Assert.assertEquals(status.getTable(), "table3");
- }
- }
- Assert.assertEquals(4, size);
- }
-
- public void fileRotationTest() throws Exception {
- // initialize replication status store for db default3.
- // This should init with eventId = -1 and status = INIT
- ReplicationStatus status = drStatusStore.getReplicationStatus("source", "target",
- "jobname3", "default3");
- Assert.assertEquals(status.getEventId(), -1);
- Assert.assertEquals(status.getStatus(), ReplicationStatus.Status.INIT);
-
- // update status 5 times resulting in 6 files : latest.json + five rotated files
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname3",
- "Default3", null, ReplicationStatus.Status.SUCCESS, 20L);
- ReplicationStatus table1 = new ReplicationStatus("source", "target", "jobname3",
- "default3", "Table1", ReplicationStatus.Status.SUCCESS, 20L);
- ArrayList<ReplicationStatus> replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(table1);
- replicationStatusList.add(dbStatus);
-
- for(int i=0; i<5; i++) {
- Thread.sleep(2000);
- drStatusStore.updateReplicationStatus("jobname3", replicationStatusList);
- }
-
- status = drStatusStore.getReplicationStatus("source", "target", "jobname3", "default3");
- Path statusPath = drStatusStore.getStatusDirPath(status.getDatabase(), status.getJobName());
- RemoteIterator<LocatedFileStatus> iter = fileSystem.listFiles(statusPath, false);
- Assert.assertEquals(getRemoteIterSize(iter), 6);
-
- drStatusStore.rotateStatusFiles(statusPath, 3, 10000000);
- iter = fileSystem.listFiles(statusPath, false);
- Assert.assertEquals(getRemoteIterSize(iter), 6);
-
- drStatusStore.rotateStatusFiles(statusPath, 3, 6000);
- iter = fileSystem.listFiles(statusPath, false);
- Assert.assertEquals(getRemoteIterSize(iter), 3);
- }
-
- public void wrongJobNameTest() throws Exception {
- ReplicationStatus dbStatus = new ReplicationStatus("source", "target", "jobname3",
- "Default3", null, ReplicationStatus.Status.SUCCESS, 20L);
- ArrayList<ReplicationStatus> replicationStatusList = new ArrayList<ReplicationStatus>();
- replicationStatusList.add(dbStatus);
-
- try {
- drStatusStore.updateReplicationStatus("jobname2", replicationStatusList);
- Assert.fail();
- } catch (HiveReplicationException e) {
- // Expected exception due to jobname mismatch
- }
- }
-
- @AfterClass
- public void cleanUp() throws IOException {
- fileSystem.delete(new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH), true);
- }
-
- private int getRemoteIterSize(RemoteIterator<LocatedFileStatus> iter) throws IOException {
- int size = 0;
- while(iter.hasNext()) {
- iter.next();
- size++;
- }
- return size;
- }
-
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java
deleted file mode 100644
index cdeddaa..0000000
--- a/addons/hivedr/src/test/java/org/apache/falcon/hive/HiveDRTest.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.falcon.hive;
-
-import com.google.common.base.Function;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.IOUtils;
-import org.apache.falcon.cluster.util.EmbeddedCluster;
-import org.apache.falcon.hadoop.JailedFileSystem;
-import org.apache.falcon.hive.util.DRStatusStore;
-import org.apache.falcon.hive.util.DelimiterUtils;
-import org.apache.falcon.hive.util.EventSourcerUtils;
-import org.apache.falcon.hive.util.HiveDRStatusStore;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hive.hcatalog.api.HCatClient;
-import org.apache.hive.hcatalog.api.HCatNotificationEvent;
-import org.apache.hive.hcatalog.api.repl.Command;
-import org.apache.hive.hcatalog.api.repl.ReplicationTask;
-import org.apache.hive.hcatalog.api.repl.ReplicationUtils;
-import org.apache.hive.hcatalog.api.repl.StagingDirectoryProvider;
-import org.apache.hive.hcatalog.common.HCatConstants;
-import org.apache.hive.hcatalog.messaging.MessageFactory;
-import org.testng.Assert;
-import org.testng.annotations.AfterMethod;
-import org.testng.annotations.BeforeMethod;
-import org.testng.annotations.Test;
-
-import javax.annotation.Nullable;
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Test for Hive DR export and import.
- */
-public class HiveDRTest {
- private FileSystem fileSystem;
- private HCatClient client;
- private MetaStoreEventSourcer sourcer;
- private EmbeddedCluster cluster;
- private String dbName = "testdb";
- private String tableName = "testtable";
- private StagingDirectoryProvider stagingDirectoryProvider;
- private MessageFactory msgFactory = MessageFactory.getInstance();
-
- @BeforeMethod
- public void setup() throws Exception {
- client = HCatClient.create(new HiveConf());
- initializeFileSystem();
- sourcer = new MetaStoreEventSourcer(client, null, new EventSourcerUtils(cluster.getConf(),
- false, "hiveReplTest"), null);
- stagingDirectoryProvider = new StagingDirectoryProvider.TrivialImpl("/tmp", "/");
- }
-
- private void initializeFileSystem() throws Exception {
- cluster = EmbeddedCluster.newCluster("hivedr");
- fileSystem = new JailedFileSystem();
- Path storePath = new Path(DRStatusStore.BASE_DEFAULT_STORE_PATH);
- fileSystem.initialize(LocalFileSystem.getDefaultUri(cluster.getConf()), cluster.getConf());
- if (fileSystem.exists(storePath)) {
- fileSystem.delete(storePath, true);
- }
- FileSystem.mkdirs(fileSystem, storePath, DRStatusStore.DEFAULT_STORE_PERMISSION);
- HiveDRStatusStore drStatusStore = new HiveDRStatusStore(fileSystem,
- fileSystem.getFileStatus(storePath).getGroup());
- }
-
- // Dummy mapping used for all db and table name mappings
- private Function<String, String> debugMapping = new Function<String, String>(){
- @Nullable
- @Override
- public String apply(@Nullable String s) {
- if (s == null){
- return null;
- } else {
- StringBuilder sb = new StringBuilder(s);
- return sb.toString() + sb.reverse().toString();
- }
- }
- };
-
- @Test
- public void testExportImportReplication() throws Exception {
- Table t = new Table();
- t.setDbName(dbName);
- t.setTableName(tableName);
- NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
- HCatConstants.HCAT_CREATE_TABLE_EVENT, msgFactory.buildCreateTableMessage(t).toString());
- event.setDbName(t.getDbName());
- event.setTableName(t.getTableName());
-
- HCatNotificationEvent hev = new HCatNotificationEvent(event);
- ReplicationTask rtask = ReplicationTask.create(client, hev);
-
- Assert.assertEquals(hev.toString(), rtask.getEvent().toString());
- verifyExportImportReplicationTask(rtask);
- }
-
- private void verifyExportImportReplicationTask(ReplicationTask rtask) throws Exception {
- Assert.assertEquals(true, rtask.needsStagingDirs());
- Assert.assertEquals(false, rtask.isActionable());
-
- rtask.withSrcStagingDirProvider(stagingDirectoryProvider)
- .withDstStagingDirProvider(stagingDirectoryProvider)
- .withDbNameMapping(debugMapping)
- .withTableNameMapping(debugMapping);
-
- List<ReplicationTask> taskAdd = new ArrayList<ReplicationTask>();
- taskAdd.add(rtask);
- sourcer.processTableReplicationEvents(taskAdd.iterator(), dbName, tableName,
- stagingDirectoryProvider.toString(), stagingDirectoryProvider.toString());
-
- String metaFileName = sourcer.persistToMetaFile("hiveReplTest");
- String event = readEventFile(new Path(metaFileName));
- Assert.assertEquals(event.split(DelimiterUtils.FIELD_DELIM).length, 4);
- Assert.assertEquals(dbName,
- new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[0]), "UTF-8"));
- Assert.assertEquals(tableName,
- new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[1]), "UTF-8"));
-
- String exportStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[2]));
- String[] commandList = exportStr.split(DelimiterUtils.NEWLINE_DELIM);
- for (String command : commandList) {
- Command cmd = ReplicationUtils.deserializeCommand(command);
- Assert.assertEquals(cmd.getEventId(), 42);
- for(String stmt : cmd.get()) {
- Assert.assertTrue(stmt.startsWith("EXPORT TABLE"));
- }
- }
-
- String importStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[3]));
- commandList = importStr.split(DelimiterUtils.NEWLINE_DELIM);
- for (String command : commandList) {
- Command cmd = ReplicationUtils.deserializeCommand(command);
- Assert.assertEquals(cmd.getEventId(), 42);
- for (String stmt : cmd.get()) {
- Assert.assertTrue(stmt.startsWith("IMPORT TABLE"));
- }
- }
- }
-
- @Test
- public void testImportReplication() throws Exception {
- Table t = new Table();
- t.setDbName("testdb");
- t.setTableName("testtable");
- NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
- HCatConstants.HCAT_DROP_TABLE_EVENT, msgFactory.buildDropTableMessage(t).toString());
- event.setDbName(t.getDbName());
- event.setTableName(t.getTableName());
-
- HCatNotificationEvent hev = new HCatNotificationEvent(event);
- ReplicationTask rtask = ReplicationTask.create(client, hev);
-
- Assert.assertEquals(hev.toString(), rtask.getEvent().toString());
- verifyImportReplicationTask(rtask);
- }
-
- private void verifyImportReplicationTask(ReplicationTask rtask) throws Exception {
- Assert.assertEquals(false, rtask.needsStagingDirs());
- Assert.assertEquals(true, rtask.isActionable());
- rtask.withDbNameMapping(debugMapping)
- .withTableNameMapping(debugMapping);
-
- List<ReplicationTask> taskAdd = new ArrayList<ReplicationTask>();
- taskAdd.add(rtask);
- sourcer.processTableReplicationEvents(taskAdd.iterator(), dbName, tableName,
- stagingDirectoryProvider.toString(), stagingDirectoryProvider.toString());
- String persistFileName = sourcer.persistToMetaFile("hiveReplTest");
- String event = readEventFile(new Path(persistFileName));
-
- Assert.assertEquals(event.split(DelimiterUtils.FIELD_DELIM).length, 4);
- Assert.assertEquals(dbName,
- new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[0]), "UTF-8"));
- Assert.assertEquals(tableName,
- new String(Base64.decodeBase64(event.split(DelimiterUtils.FIELD_DELIM)[1]), "UTF-8"));
-
- String exportStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[2]));
- String[] commandList = exportStr.split(DelimiterUtils.NEWLINE_DELIM);
- for (String command : commandList) {
- Command cmd = ReplicationUtils.deserializeCommand(command);
- Assert.assertEquals(cmd.getEventId(), 42);
- Assert.assertEquals(cmd.get().size(), 0); //In case of drop size of export is 0. Metadata operation
- }
-
- String importStr = readEventFile(new Path(event.split(DelimiterUtils.FIELD_DELIM)[3]));
- commandList = importStr.split(DelimiterUtils.NEWLINE_DELIM);
- for (String command : commandList) {
- Command cmd = ReplicationUtils.deserializeCommand(command);
- Assert.assertEquals(cmd.getEventId(), 42);
- for (String stmt : cmd.get()) {
- Assert.assertTrue(stmt.startsWith("DROP TABLE"));
- }
- }
- }
-
- private long getEventId() {
- // Does not need to be unique, just non-zero distinct value to test against.
- return 42;
- }
-
- private int getTime() {
- // Does not need to be actual time, just non-zero distinct value to test against.
- return 1729;
- }
-
- private String readEventFile(Path eventFileName) throws IOException {
- StringBuilder eventString = new StringBuilder();
- BufferedReader in = new BufferedReader(new InputStreamReader(
- fileSystem.open(eventFileName)));
- try {
- String line;
- while ((line=in.readLine())!=null) {
- eventString.append(line);
- }
- } catch (Exception e) {
- throw new IOException(e);
- } finally {
- IOUtils.closeQuietly(in);
- }
- return eventString.toString();
- }
-
- @AfterMethod
- public void tearDown() throws Exception {
- client.close();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java
----------------------------------------------------------------------
diff --git a/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java b/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java
deleted file mode 100644
index a02639c..0000000
--- a/addons/hivedr/src/test/java/org/apache/falcon/hive/ReplicationStatusTest.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.falcon.hive;
-
-import org.apache.falcon.hive.exception.HiveReplicationException;
-import org.apache.falcon.hive.util.ReplicationStatus;
-import org.testng.Assert;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
-
-/**
- * Unit tests for ReplicationStatus.
- */
-@Test
-public class ReplicationStatusTest {
-
- private ReplicationStatus dbStatus, tableStatus;
-
- public ReplicationStatusTest() {}
-
-
- @BeforeClass
- public void prepare() throws Exception {
- dbStatus = new ReplicationStatus("source", "target", "jobname",
- "default1", null, ReplicationStatus.Status.INIT, 0L);
- tableStatus = new ReplicationStatus("source", "target", "jobname",
- "testDb", "Table1", ReplicationStatus.Status.SUCCESS, 0L);
- }
-
- public void replicationStatusSerializeTest() throws Exception {
- String expected = "{\n \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n \"jobName\": \"jobname\",\n"
- + " \"database\": \"testdb\",\n \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n \"eventId\": 0\n}";
- String actual = tableStatus.toJsonString();
- Assert.assertEquals(actual, expected);
-
- expected = "{\n \"sourceUri\": \"source\",\n \"targetUri\": \"target\",\n"
- + " \"jobName\": \"jobname\",\n \"database\": \"default1\",\n"
- + " \"status\": \"INIT\",\n \"eventId\": 0\n}";
- actual = dbStatus.toJsonString();
- Assert.assertEquals(actual, expected);
- }
-
- public void replicationStatusDeserializeTest() throws Exception {
- String tableInput = "{\n \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n"
- + " \"database\": \"Test1\",\n \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n \"eventId\": 0\n}";
- String dbInput = "{ \"sourceUri\": \"source\", \"targetUri\": \"target\",\"jobName\": \"jobname\",\n"
- + " \"database\": \"default1\", \"status\": \"FAILURE\","
- + " \"eventId\": 27, \"statusLog\": \"testLog\"}";
-
- ReplicationStatus newDbStatus = new ReplicationStatus(dbInput);
- ReplicationStatus newTableStatus = new ReplicationStatus(tableInput);
-
- Assert.assertEquals(newDbStatus.getTable(), null);
- Assert.assertEquals(newDbStatus.getEventId(), 27);
- Assert.assertEquals(newDbStatus.getDatabase(), "default1");
- Assert.assertEquals(newDbStatus.getLog(), "testLog");
- Assert.assertEquals(newDbStatus.getStatus(), ReplicationStatus.Status.FAILURE);
-
-
- Assert.assertEquals(newTableStatus.getTable(), "table1");
- Assert.assertEquals(newTableStatus.getEventId(), 0);
- Assert.assertEquals(newTableStatus.getDatabase(), "test1");
- Assert.assertEquals(newTableStatus.getJobName(), "testJob");
-
- // no table, no eventId, no log
- dbInput = "{\n \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n"
- + " \"database\": \"Test1\",\n"
- + " \"status\": \"SUCCESS\"\n}";
- newDbStatus = new ReplicationStatus(dbInput);
-
- Assert.assertEquals(newDbStatus.getDatabase(), "test1");
- Assert.assertEquals(newDbStatus.getTable(), null);
- Assert.assertEquals(newDbStatus.getEventId(), -1);
- Assert.assertEquals(newDbStatus.getLog(), null);
-
- }
-
- public void invalidEventIdTest() throws Exception {
- String tableInput = "{\n \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n"
- + " \"database\": \"test1\",\n \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n \"eventId\": -100\n}";
-
- ReplicationStatus newTableStatus = new ReplicationStatus(tableInput);
- Assert.assertEquals(newTableStatus.getEventId(), -1);
-
- newTableStatus.setEventId(-200);
- Assert.assertEquals(newTableStatus.getEventId(), -1);
-
- String expected = "{\n \"sourceUri\": \"source\",\n"
- + " \"targetUri\": \"target\",\n \"jobName\": \"testJob\",\n"
- + " \"database\": \"test1\",\n \"table\": \"table1\",\n"
- + " \"status\": \"SUCCESS\",\n \"eventId\": -1\n}";
- String actual = newTableStatus.toJsonString();
- Assert.assertEquals(actual, expected);
-
- newTableStatus.setEventId(50);
- Assert.assertEquals(newTableStatus.getEventId(), 50);
- }
-
- public void invalidStatusTest() throws Exception {
-
- String dbInput = "{ \"sourceUri\": \"source\", \"targetUri\": \"target\",\"jobName\": \"jobname\",\n"
- + " \"database\": \"default1\", \"status\": \"BLAH\","
- + " \"eventId\": 27, \"statusLog\": \"testLog\"}";
-
- try {
- new ReplicationStatus(dbInput);
- Assert.fail();
- } catch (HiveReplicationException e) {
- Assert.assertEquals(e.getMessage(),
- "Unable to deserialize jsonString to ReplicationStatus. Invalid status BLAH");
- }
- }
-
-
-}
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/README.txt
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/README.txt b/addons/recipes/hdfs-replication/README.txt
deleted file mode 100644
index 5742d43..0000000
--- a/addons/recipes/hdfs-replication/README.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-HDFS Directory Replication Recipe
-
-Overview
-This recipe implements replicating arbitrary directories on HDFS from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-* Copy directories between HDFS clusters with out dated partitions
-* Archive directories from HDFS to Cloud. Ex: S3, Azure WASB
-
-Limitations
-As the data volume and number of files grow, this can get inefficient.
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/pom.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/pom.xml b/addons/recipes/hdfs-replication/pom.xml
deleted file mode 100644
index 98d9795..0000000
--- a/addons/recipes/hdfs-replication/pom.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.apache.falcon.recipes</groupId>
- <artifactId>falcon-hdfs-replication-recipe</artifactId>
- <version>0.10-SNAPSHOT</version>
- <description>Apache Falcon Sample Hdfs Replicaiton Recipe</description>
- <name>Apache Falcon Sample Hdfs Replication Recipe</name>
- <packaging>jar</packaging>
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
deleted file mode 100644
index 441a189..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-template.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<process name="##falcon.recipe.job.name##" xmlns="uri:falcon:process:0.1">
- <clusters>
- <!-- source -->
- <cluster name="##falcon.recipe.cluster.name##">
- <validity end="##falcon.recipe.cluster.validity.end##" start="##falcon.recipe.cluster.validity.start##"/>
- </cluster>
- </clusters>
-
- <tags>_falcon_mirroring_type=HDFS</tags>
-
- <parallel>1</parallel>
- <!-- Dir replication needs to run only once to catch up -->
- <order>LAST_ONLY</order>
- <frequency>##falcon.recipe.frequency##</frequency>
- <timezone>UTC</timezone>
-
- <properties>
- <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
- </properties>
-
- <workflow name="##falcon.recipe.workflow.name##" engine="oozie" path="/apps/data-mirroring/workflows/hdfs-replication-workflow.xml" lib="##workflow.lib.path##"/>
- <retry policy="##falcon.recipe.retry.policy##" delay="##falcon.recipe.retry.delay##" attempts="3"/>
- <notification type="##falcon.recipe.notification.type##" to="##falcon.recipe.notification.receivers##"/>
- <ACL/>
-</process>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
deleted file mode 100644
index c1966be..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication-workflow.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-fs-workflow'>
- <start to='dr-replication'/>
- <!-- Replication action -->
- <action name="dr-replication">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp</value>
- </property>
- <property>
- <name>oozie.launcher.oozie.libpath</name>
- <value>${wf:conf("falcon.libpath")}</value>
- </property>
- <property>
- <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
- <value>${drSourceClusterFS},${drTargetClusterFS}</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.replication.FeedReplicator</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-maxMaps</arg>
- <arg>${distcpMaxMaps}</arg>
- <arg>-mapBandwidth</arg>
- <arg>${distcpMapBandwidth}</arg>
- <arg>-sourcePaths</arg>
- <arg>${drSourceDir}</arg>
- <arg>-targetPath</arg>
- <arg>${drTargetClusterFS}${drTargetDir}</arg>
- <arg>-falconFeedStorageType</arg>
- <arg>FILESYSTEM</arg>
- <arg>-availabilityFlag</arg>
- <arg>${availabilityFlag == 'NA' ? "NA" : availabilityFlag}</arg>
- <arg>-counterLogDir</arg>
- <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}</arg>
- </java>
- <ok to="end"/>
- <error to="fail"/>
- </action>
- <kill name="fail">
- <message>
- Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
- </message>
- </kill>
- <end name="end"/>
-</workflow-app>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
----------------------------------------------------------------------
diff --git a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties b/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
deleted file mode 100644
index 4642835..0000000
--- a/addons/recipes/hdfs-replication/src/main/resources/hdfs-replication.properties
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-##### NOTE: This is a TEMPLATE file which can be copied and edited
-
-##### Recipe properties
-##### Unique recipe job name
-falcon.recipe.name=sales-monthly
-
-##### Workflow properties
-falcon.recipe.workflow.name=hdfs-dr-workflow
-# Provide Wf absolute path. This can be HDFS or local FS path. If WF is on local FS it will be copied to HDFS
-falcon.recipe.workflow.path=/apps/data-mirroring/workflows/hdfs-replication-workflow.xml
-# Provide Wf lib absolute path. This can be HDFS or local FS path. If libs are on local FS it will be copied to HDFS
-#falcon.recipe.workflow.lib.path=/recipes/hdfs-replication/lib
-
-##### Cluster properties
-# Cluster where job should run
-falcon.recipe.cluster.name=primaryCluster
-# Change the cluster hdfs write end point here. This is mandatory.
-falcon.recipe.cluster.hdfs.writeEndPoint=hdfs://240.0.0.10:8020
-# Change the cluster validity start time here
-falcon.recipe.cluster.validity.start=2015-03-13T00:00Z
-# Change the cluster validity end time here
-falcon.recipe.cluster.validity.end=2016-12-30T00:00Z
-
-##### Scheduling properties
-# Change the recipe frequency here. Valid frequency type are minutes, hours, days, months
-falcon.recipe.process.frequency=minutes(5)
-
-##### Tag properties - An optional list of comma separated tags, Key Value Pairs, separated by comma
-##### Uncomment to add tags
-#falcon.recipe.tags=
-
-##### Retry policy properties
-
-falcon.recipe.retry.policy=periodic
-falcon.recipe.retry.delay=minutes(30)
-falcon.recipe.retry.attempts=3
-falcon.recipe.retry.onTimeout=false
-
-##### ACL properties - Uncomment and change ACL if authorization is enabled
-
-falcon.recipe.acl.owner=ambari-qa
-falcon.recipe.acl.group=users
-falcon.recipe.acl.permission=0x755
-falcon.recipe.nn.principal=nn/_HOST@EXAMPLE.COM
-
-##### Custom Job properties
-
-# Specify multiple comma separated source directories
-drSourceDir=/user/hrt_qa/dr/test/primaryCluster/input
-drSourceClusterFS=hdfs://240.0.0.10:8020
-drTargetDir=/user/hrt_qa/dr/test/backupCluster/input
-drTargetClusterFS=hdfs://240.0.0.11:8020
-
-# Change it to specify the maximum number of mappers for DistCP
-distcpMaxMaps=1
-# Change it to specify the bandwidth in MB for each mapper in DistCP
-distcpMapBandwidth=100
-
-##### Email Notification for Falcon instance completion
-falcon.recipe.notification.type=email
-falcon.recipe.notification.receivers=NA
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/README.txt
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/README.txt b/addons/recipes/hive-disaster-recovery/README.txt
deleted file mode 100644
index ab393b1..0000000
--- a/addons/recipes/hive-disaster-recovery/README.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hive Metastore Disaster Recovery Recipe
-
-Overview
-This recipe implements replicating hive metadata and data from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-*
-*
-
-Limitations
-*
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Hive Metastore Disaster Recovery Recipe
-
-Overview
-This recipe implements replicating hive metadata and data from one
-Hadoop cluster to another Hadoop cluster.
-This piggy backs on replication solution in Falcon which uses the DistCp tool.
-
-Use Case
-*
-*
-
-Limitations
-*
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/pom.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/pom.xml b/addons/recipes/hive-disaster-recovery/pom.xml
deleted file mode 100644
index 0f782d2..0000000
--- a/addons/recipes/hive-disaster-recovery/pom.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.apache.falcon.recipes</groupId>
- <artifactId>falcon-hive-replication-recipe</artifactId>
- <version>0.10-SNAPSHOT</version>
- <description>Apache Falcon Hive Disaster Recovery Recipe</description>
- <name>Apache Falcon Sample Hive Disaster Recovery Recipe</name>
- <packaging>jar</packaging>
-</project>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
deleted file mode 100644
index f0de091..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-template.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-
-<process name="##name##" xmlns="uri:falcon:process:0.1">
- <clusters>
- <!-- source -->
- <cluster name="##cluster.name##">
- <validity end="##cluster.validity.end##" start="##cluster.validity.start##"/>
- </cluster>
- </clusters>
-
- <tags>_falcon_mirroring_type=HIVE</tags>
-
- <parallel>1</parallel>
- <!-- Replication needs to run only once to catch up -->
- <order>LAST_ONLY</order>
- <frequency>##process.frequency##</frequency>
- <timezone>UTC</timezone>
-
- <properties>
- <property name="oozie.wf.subworkflow.classpath.inheritance" value="true"/>
- </properties>
-
- <workflow name="##workflow.name##" engine="oozie"
- path="/apps/data-mirroring/workflows/hive-disaster-recovery-workflow.xml" lib="##workflow.lib.path##"/>
- <retry policy="##retry.policy##" delay="##retry.delay##" attempts="3"/>
- <notification type="##notification.type##" to="##notification.receivers##"/>
- <ACL/>
-</process>
http://git-wip-us.apache.org/repos/asf/falcon/blob/6f5b476c/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
----------------------------------------------------------------------
diff --git a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml b/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
deleted file mode 100644
index 0494cf6..0000000
--- a/addons/recipes/hive-disaster-recovery/src/main/resources/hive-disaster-recovery-secure-workflow.xml
+++ /dev/null
@@ -1,357 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- -->
-<workflow-app xmlns='uri:oozie:workflow:0.3' name='falcon-dr-hive-workflow'>
- <credentials>
- <credential name='hive_src_credentials' type='hcat'>
- <property>
- <name>hcat.metastore.uri</name>
- <value>${sourceMetastoreUri}</value>
- </property>
- <property>
- <name>hcat.metastore.principal</name>
- <value>${sourceHiveMetastoreKerberosPrincipal}</value>
- </property>
- </credential>
- <credential name='hive_tgt_credentials' type='hcat'>
- <property>
- <name>hcat.metastore.uri</name>
- <value>${targetMetastoreUri}</value>
- </property>
- <property>
- <name>hcat.metastore.principal</name>
- <value>${targetHiveMetastoreKerberosPrincipal}</value>
- </property>
- </credential>
- <credential name="hive2_src_credentials" type="hive2">
- <property>
- <name>hive2.server.principal</name>
- <value>${sourceHive2KerberosPrincipal}</value>
- </property>
- <property>
- <name>hive2.jdbc.url</name>
- <value>jdbc:${sourceHiveServer2Uri}/${sourceDatabase}</value>
- </property>
- </credential>
- <credential name="hive2_tgt_credentials" type="hive2">
- <property>
- <name>hive2.server.principal</name>
- <value>${targetHive2KerberosPrincipal}</value>
- </property>
- <property>
- <name>hive2.jdbc.url</name>
- <value>jdbc:${targetHiveServer2Uri}/${sourceDatabase}</value>
- </property>
- </credential>
- </credentials>
- <start to='last-event'/>
- <action name="last-event" cred="hive_tgt_credentials">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- <property>
- <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- <property>
- <name>mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-sourceNNKerberosPrincipal</arg>
- <arg>${sourceNNKerberosPrincipal}</arg>
- <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
- <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
- <arg>-sourceHive2KerberosPrincipal</arg>
- <arg>${sourceHive2KerberosPrincipal}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-targetNNKerberosPrincipal</arg>
- <arg>${targetNNKerberosPrincipal}</arg>
- <arg>-targetHiveMetastoreKerberosPrincipal</arg>
- <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
- <arg>-targetHive2KerberosPrincipal</arg>
- <arg>${targetHive2KerberosPrincipal}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-clusterForJobNNKerberosPrincipal</arg>
- <arg>${clusterForJobNNKerberosPrincipal}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>lastevents</arg>
- </java>
- <ok to="export-dr-replication"/>
- <error to="fail"/>
- </action>
- <!-- Export Replication action -->
- <action name="export-dr-replication" cred="hive_src_credentials,hive2_src_credentials">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- <property>
- <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- <property>
- <name>mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-replicationMaxMaps</arg>
- <arg>${replicationMaxMaps}</arg>
- <arg>-distcpMaxMaps</arg>
- <arg>${distcpMaxMaps}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-sourceNNKerberosPrincipal</arg>
- <arg>${sourceNNKerberosPrincipal}</arg>
- <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
- <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
- <arg>-sourceHive2KerberosPrincipal</arg>
- <arg>${sourceHive2KerberosPrincipal}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-targetNNKerberosPrincipal</arg>
- <arg>${targetNNKerberosPrincipal}</arg>
- <arg>-targetHiveMetastoreKerberosPrincipal</arg>
- <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
- <arg>-targetHive2KerberosPrincipal</arg>
- <arg>${targetHive2KerberosPrincipal}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-distcpMapBandwidth</arg>
- <arg>${distcpMapBandwidth}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-clusterForJobNNKerberosPrincipal</arg>
- <arg>${clusterForJobNNKerberosPrincipal}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>export</arg>
- <arg>-counterLogDir</arg>
- <arg>${logDir}/job-${nominalTime}/${srcClusterName == 'NA' ? '' : srcClusterName}/</arg>
- </java>
- <ok to="import-dr-replication"/>
- <error to="fail"/>
- </action>
- <!-- Import Replication action -->
- <action name="import-dr-replication" cred="hive_tgt_credentials,hive2_tgt_credentials">
- <java>
- <job-tracker>${jobTracker}</job-tracker>
- <name-node>${nameNode}</name-node>
- <configuration>
- <property> <!-- hadoop 2 parameter -->
- <name>oozie.launcher.mapreduce.job.user.classpath.first</name>
- <value>true</value>
- </property>
- <property>
- <name>mapred.job.queue.name</name>
- <value>${queueName}</value>
- </property>
- <property>
- <name>oozie.launcher.mapred.job.priority</name>
- <value>${jobPriority}</value>
- </property>
- <property>
- <name>oozie.use.system.libpath</name>
- <value>true</value>
- </property>
- <property>
- <name>oozie.action.sharelib.for.java</name>
- <value>distcp,hive,hive2,hcatalog</value>
- </property>
- <property>
- <name>oozie.launcher.mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- <property>
- <name>mapreduce.job.hdfs-servers</name>
- <value>${sourceNN},${targetNN}</value>
- </property>
- </configuration>
- <main-class>org.apache.falcon.hive.HiveDRTool</main-class>
- <arg>-Dmapred.job.queue.name=${queueName}</arg>
- <arg>-Dmapred.job.priority=${jobPriority}</arg>
- <arg>-falconLibPath</arg>
- <arg>${wf:conf("falcon.libpath")}</arg>
- <arg>-replicationMaxMaps</arg>
- <arg>${replicationMaxMaps}</arg>
- <arg>-distcpMaxMaps</arg>
- <arg>${distcpMaxMaps}</arg>
- <arg>-sourceCluster</arg>
- <arg>${sourceCluster}</arg>
- <arg>-sourceMetastoreUri</arg>
- <arg>${sourceMetastoreUri}</arg>
- <arg>-sourceHiveServer2Uri</arg>
- <arg>${sourceHiveServer2Uri}</arg>
- <arg>-sourceDatabase</arg>
- <arg>${sourceDatabase}</arg>
- <arg>-sourceTable</arg>
- <arg>${sourceTable}</arg>
- <arg>-sourceStagingPath</arg>
- <arg>${sourceStagingPath}</arg>
- <arg>-sourceNN</arg>
- <arg>${sourceNN}</arg>
- <arg>-sourceNNKerberosPrincipal</arg>
- <arg>${sourceNNKerberosPrincipal}</arg>
- <arg>-sourceHiveMetastoreKerberosPrincipal</arg>
- <arg>${sourceHiveMetastoreKerberosPrincipal}</arg>
- <arg>-sourceHive2KerberosPrincipal</arg>
- <arg>${sourceHive2KerberosPrincipal}</arg>
- <arg>-targetCluster</arg>
- <arg>${targetCluster}</arg>
- <arg>-targetMetastoreUri</arg>
- <arg>${targetMetastoreUri}</arg>
- <arg>-targetHiveServer2Uri</arg>
- <arg>${targetHiveServer2Uri}</arg>
- <arg>-targetStagingPath</arg>
- <arg>${targetStagingPath}</arg>
- <arg>-targetNN</arg>
- <arg>${targetNN}</arg>
- <arg>-targetNNKerberosPrincipal</arg>
- <arg>${targetNNKerberosPrincipal}</arg>
- <arg>-targetHiveMetastoreKerberosPrincipal</arg>
- <arg>${targetHiveMetastoreKerberosPrincipal}</arg>
- <arg>-targetHive2KerberosPrincipal</arg>
- <arg>${targetHive2KerberosPrincipal}</arg>
- <arg>-maxEvents</arg>
- <arg>${maxEvents}</arg>
- <arg>-distcpMapBandwidth</arg>
- <arg>${distcpMapBandwidth}</arg>
- <arg>-clusterForJobRun</arg>
- <arg>${clusterForJobRun}</arg>
- <arg>-clusterForJobRunWriteEP</arg>
- <arg>${clusterForJobRunWriteEP}</arg>
- <arg>-clusterForJobNNKerberosPrincipal</arg>
- <arg>${clusterForJobNNKerberosPrincipal}</arg>
- <arg>-drJobName</arg>
- <arg>${drJobName}-${nominalTime}</arg>
- <arg>-executionStage</arg>
- <arg>import</arg>
- </java>
- <ok to="end"/>
- <error to="fail"/>
- </action>
- <kill name="fail">
- <message>
- Workflow action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
- </message>
- </kill>
- <end name="end"/>
-</workflow-app>